From 1481d27782bae76cddc16a7516e998446d4a12b9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jakub=20Vav=C5=99=C3=ADk?=
Date: Thu, 28 Jan 2021 17:37:47 +0100
Subject: [PATCH] Initial v1.0.0 commit
---
.ci/component_descriptor | 86 +
.ci/pipeline_definitions | 46 +
.ci/prepare_release | 3 +
.ci/set_dependency_version | 3 +
.ci/verify | 17 +
.circleci/config.yaml | 39 +
.dockerignore | 22 +
.github/ISSUE_TEMPLATE/bug.md | 39 +
.github/ISSUE_TEMPLATE/feature.md | 27 +
.github/ISSUE_TEMPLATE/flaking-test.md | 35 +
.github/ISSUE_TEMPLATE/support.md | 14 +
.github/pull_request_template.md | 40 +
.gitignore | 23 +
.golangci.yaml | 7 +
Dockerfile | 13 +
LICENSE.md | 288 +
Makefile | 120 +
NOTICE.md | 15 +
README.md | 59 +
VERSION | 1 +
.../.helmignore | 22 +
.../Chart.yaml | 5 +
.../doc.go | 18 +
.../templates/_helpers.tpl | 36 +
.../configmap-imagevector-overwrite.yaml | 13 +
.../templates/deployment.yaml | 74 +
.../templates/fleet-config.yaml | 7 +
.../templates/priorityclass.yaml | 7 +
.../templates/rbac-shoot-cert-service.yaml | 16 +
.../templates/rbac.yaml | 113 +
.../templates/serviceaccount.yaml | 10 +
.../templates/vpa.yaml | 22 +
.../values.yaml | 40 +
.../app/app.go | 95 +
.../app/options.go | 74 +
.../main.go | 33 +
docs/installation/setup.md | 79 +
docs/usage/register_cluster.md | 51 +
example/00-config.yaml | 5 +
example/10-fake-shoot-controlplane.yaml | 186 +
example/20-crd-cluster.yaml | 23 +
example/20-crd-extension.yaml | 120 +
example/20-crd-issuer.yaml | 45 +
example/20-crd-managedresource.yaml | 36 +
example/25-rbac.yaml | 23 +
example/30-cluster.yaml | 28 +
example/30-extension.yaml | 11 +
example/controller-registration.yaml | 18 +
go.mod | 46 +
go.sum | 1695 +
hack/api-reference/config.json | 24 +
hack/api-reference/config.md | 97 +
hack/component_descriptor | 57 +
hack/tools.go | 32 +
hack/update-codegen.sh | 49 +
hack/update-github-templates.sh | 30 +
pkg/apis/config/doc.go | 5 +
pkg/apis/config/install/install.go | 31 +
pkg/apis/config/loader/loader.go | 58 +
pkg/apis/config/register.go | 37 +
pkg/apis/config/types.go | 26 +
pkg/apis/config/v1alpha1/defaults.go | 9 +
pkg/apis/config/v1alpha1/doc.go | 10 +
pkg/apis/config/v1alpha1/register.go | 40 +
pkg/apis/config/v1alpha1/types.go | 28 +
.../v1alpha1/zz_generated.conversion.go | 78 +
.../config/v1alpha1/zz_generated.deepcopy.go | 69 +
.../config/v1alpha1/zz_generated.defaults.go | 32 +
pkg/apis/config/zz_generated.deepcopy.go | 69 +
.../fleet/clientset/versioned/clientset.go | 97 +
pkg/client/fleet/clientset/versioned/doc.go | 20 +
.../versioned/fake/clientset_generated.go | 82 +
.../fleet/clientset/versioned/fake/doc.go | 20 +
.../clientset/versioned/fake/register.go | 56 +
.../fleet/clientset/versioned/scheme/doc.go | 20 +
.../clientset/versioned/scheme/register.go | 56 +
.../typed/fleet.cattle.io/v1alpha1/bundle.go | 195 +
.../v1alpha1/bundledeployment.go | 195 +
.../v1alpha1/bundlenamespacemapping.go | 178 +
.../typed/fleet.cattle.io/v1alpha1/cluster.go | 195 +
.../fleet.cattle.io/v1alpha1/clustergroup.go | 195 +
.../v1alpha1/clusterregistration.go | 195 +
.../v1alpha1/clusterregistrationtoken.go | 195 +
.../typed/fleet.cattle.io/v1alpha1/content.go | 168 +
.../typed/fleet.cattle.io/v1alpha1/doc.go | 20 +
.../fleet.cattle.io/v1alpha1/fake/doc.go | 20 +
.../v1alpha1/fake/fake_bundle.go | 142 +
.../v1alpha1/fake/fake_bundledeployment.go | 142 +
.../fake/fake_bundlenamespacemapping.go | 130 +
.../v1alpha1/fake/fake_cluster.go | 142 +
.../v1alpha1/fake/fake_clustergroup.go | 142 +
.../v1alpha1/fake/fake_clusterregistration.go | 142 +
.../fake/fake_clusterregistrationtoken.go | 142 +
.../v1alpha1/fake/fake_content.go | 122 +
.../fake/fake_fleet.cattle.io_client.go | 76 +
.../v1alpha1/fake/fake_gitrepo.go | 142 +
.../v1alpha1/fake/fake_gitreporestriction.go | 130 +
.../v1alpha1/fleet.cattle.io_client.go | 134 +
.../v1alpha1/generated_expansion.go | 39 +
.../typed/fleet.cattle.io/v1alpha1/gitrepo.go | 195 +
.../v1alpha1/gitreporestriction.go | 178 +
pkg/cmd/options.go | 110 +
pkg/controller/actuator.go | 204 +
pkg/controller/add.go | 66 +
pkg/controller/config/config.go | 7 +
pkg/controller/fleetmanager.go | 51 +
pkg/controller/healthcheck/add.go | 57 +
pkg/controller/utils.go | 24 +
pkg/imagevector/imagevector.go | 46 +
vendor/github.com/BurntSushi/toml/.gitignore | 5 +
vendor/github.com/BurntSushi/toml/.travis.yml | 15 +
vendor/github.com/BurntSushi/toml/COMPATIBLE | 3 +
vendor/github.com/BurntSushi/toml/COPYING | 21 +
vendor/github.com/BurntSushi/toml/Makefile | 19 +
vendor/github.com/BurntSushi/toml/README.md | 218 +
vendor/github.com/BurntSushi/toml/decode.go | 509 +
.../github.com/BurntSushi/toml/decode_meta.go | 121 +
vendor/github.com/BurntSushi/toml/doc.go | 27 +
vendor/github.com/BurntSushi/toml/encode.go | 568 +
.../BurntSushi/toml/encoding_types.go | 19 +
.../BurntSushi/toml/encoding_types_1.1.go | 18 +
vendor/github.com/BurntSushi/toml/lex.go | 953 +
vendor/github.com/BurntSushi/toml/parse.go | 592 +
vendor/github.com/BurntSushi/toml/session.vim | 1 +
.../github.com/BurntSushi/toml/type_check.go | 91 +
.../github.com/BurntSushi/toml/type_fields.go | 242 +
.../Masterminds/goutils/.travis.yml | 18 +
.../Masterminds/goutils/CHANGELOG.md | 8 +
.../Masterminds/goutils/LICENSE.txt | 202 +
.../github.com/Masterminds/goutils/README.md | 70 +
.../Masterminds/goutils/appveyor.yml | 21 +
.../goutils/cryptorandomstringutils.go | 251 +
.../Masterminds/goutils/randomstringutils.go | 268 +
.../Masterminds/goutils/stringutils.go | 224 +
.../Masterminds/goutils/wordutils.go | 357 +
.../github.com/Masterminds/semver/.travis.yml | 29 +
.../Masterminds/semver/CHANGELOG.md | 109 +
.../github.com/Masterminds/semver/LICENSE.txt | 19 +
vendor/github.com/Masterminds/semver/Makefile | 36 +
.../github.com/Masterminds/semver/README.md | 194 +
.../Masterminds/semver/appveyor.yml | 44 +
.../Masterminds/semver/collection.go | 24 +
.../Masterminds/semver/constraints.go | 423 +
vendor/github.com/Masterminds/semver/doc.go | 115 +
.../github.com/Masterminds/semver/version.go | 425 +
.../Masterminds/semver/version_fuzz.go | 10 +
.../github.com/Masterminds/sprig/.gitignore | 2 +
.../github.com/Masterminds/sprig/.travis.yml | 26 +
.../github.com/Masterminds/sprig/CHANGELOG.md | 282 +
.../github.com/Masterminds/sprig/LICENSE.txt | 20 +
vendor/github.com/Masterminds/sprig/Makefile | 13 +
vendor/github.com/Masterminds/sprig/README.md | 78 +
.../github.com/Masterminds/sprig/appveyor.yml | 26 +
vendor/github.com/Masterminds/sprig/crypto.go | 502 +
vendor/github.com/Masterminds/sprig/date.go | 83 +
.../github.com/Masterminds/sprig/defaults.go | 83 +
vendor/github.com/Masterminds/sprig/dict.go | 119 +
vendor/github.com/Masterminds/sprig/doc.go | 19 +
.../github.com/Masterminds/sprig/functions.go | 306 +
.../github.com/Masterminds/sprig/glide.yaml | 19 +
vendor/github.com/Masterminds/sprig/list.go | 311 +
.../github.com/Masterminds/sprig/network.go | 12 +
.../github.com/Masterminds/sprig/numeric.go | 169 +
.../github.com/Masterminds/sprig/reflect.go | 28 +
vendor/github.com/Masterminds/sprig/regex.go | 35 +
vendor/github.com/Masterminds/sprig/semver.go | 23 +
.../github.com/Masterminds/sprig/strings.go | 233 +
vendor/github.com/Masterminds/sprig/url.go | 66 +
.../github.com/PuerkitoBio/purell/.gitignore | 5 +
.../github.com/PuerkitoBio/purell/.travis.yml | 12 +
vendor/github.com/PuerkitoBio/purell/LICENSE | 12 +
.../github.com/PuerkitoBio/purell/README.md | 188 +
.../github.com/PuerkitoBio/purell/purell.go | 379 +
.../github.com/PuerkitoBio/urlesc/.travis.yml | 15 +
vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 +
.../github.com/PuerkitoBio/urlesc/README.md | 16 +
.../github.com/PuerkitoBio/urlesc/urlesc.go | 180 +
.../gen-crd-api-reference-docs/.gitignore | 16 +
.../.goreleaser.yml | 24 +
.../gen-crd-api-reference-docs/.travis.yml | 32 +
.../ahmetb/gen-crd-api-reference-docs/LICENSE | 201 +
.../gen-crd-api-reference-docs/README.md | 78 +
.../example-config.json | 28 +
.../ahmetb/gen-crd-api-reference-docs/go.mod | 12 +
.../ahmetb/gen-crd-api-reference-docs/go.sum | 23 +
.../ahmetb/gen-crd-api-reference-docs/main.go | 628 +
vendor/github.com/beorn7/perks/LICENSE | 20 +
.../beorn7/perks/quantile/exampledata.txt | 2388 +
.../beorn7/perks/quantile/stream.go | 316 +
.../github.com/cespare/xxhash/v2/.travis.yml | 8 +
.../github.com/cespare/xxhash/v2/LICENSE.txt | 22 +
vendor/github.com/cespare/xxhash/v2/README.md | 67 +
vendor/github.com/cespare/xxhash/v2/go.mod | 3 +
vendor/github.com/cespare/xxhash/v2/go.sum | 0
vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 +
.../cespare/xxhash/v2/xxhash_amd64.go | 13 +
.../cespare/xxhash/v2/xxhash_amd64.s | 215 +
.../cespare/xxhash/v2/xxhash_other.go | 76 +
.../cespare/xxhash/v2/xxhash_safe.go | 15 +
.../cespare/xxhash/v2/xxhash_unsafe.go | 46 +
.../cyphar/filepath-securejoin/.travis.yml | 19 +
.../cyphar/filepath-securejoin/LICENSE | 28 +
.../cyphar/filepath-securejoin/README.md | 65 +
.../cyphar/filepath-securejoin/VERSION | 1 +
.../cyphar/filepath-securejoin/join.go | 134 +
.../cyphar/filepath-securejoin/vendor.conf | 1 +
.../cyphar/filepath-securejoin/vfs.go | 41 +
vendor/github.com/davecgh/go-spew/LICENSE | 15 +
.../github.com/davecgh/go-spew/spew/bypass.go | 145 +
.../davecgh/go-spew/spew/bypasssafe.go | 38 +
.../github.com/davecgh/go-spew/spew/common.go | 341 +
.../github.com/davecgh/go-spew/spew/config.go | 306 +
vendor/github.com/davecgh/go-spew/spew/doc.go | 211 +
.../github.com/davecgh/go-spew/spew/dump.go | 509 +
.../github.com/davecgh/go-spew/spew/format.go | 419 +
.../github.com/davecgh/go-spew/spew/spew.go | 148 +
.../docker/spdystream/CONTRIBUTING.md | 13 +
vendor/github.com/docker/spdystream/LICENSE | 191 +
.../github.com/docker/spdystream/LICENSE.docs | 425 +
.../github.com/docker/spdystream/MAINTAINERS | 28 +
vendor/github.com/docker/spdystream/README.md | 77 +
.../docker/spdystream/connection.go | 958 +
.../github.com/docker/spdystream/handlers.go | 38 +
.../github.com/docker/spdystream/priority.go | 98 +
.../docker/spdystream/spdy/dictionary.go | 187 +
.../github.com/docker/spdystream/spdy/read.go | 348 +
.../docker/spdystream/spdy/types.go | 275 +
.../docker/spdystream/spdy/write.go | 318 +
vendor/github.com/docker/spdystream/stream.go | 327 +
vendor/github.com/docker/spdystream/utils.go | 16 +
.../github.com/emicklei/go-restful/.gitignore | 70 +
.../emicklei/go-restful/.travis.yml | 6 +
.../github.com/emicklei/go-restful/CHANGES.md | 273 +
vendor/github.com/emicklei/go-restful/LICENSE | 22 +
.../github.com/emicklei/go-restful/Makefile | 7 +
.../github.com/emicklei/go-restful/README.md | 88 +
vendor/github.com/emicklei/go-restful/Srcfile | 1 +
.../emicklei/go-restful/bench_test.sh | 10 +
.../emicklei/go-restful/compress.go | 123 +
.../emicklei/go-restful/compressor_cache.go | 103 +
.../emicklei/go-restful/compressor_pools.go | 91 +
.../emicklei/go-restful/compressors.go | 54 +
.../emicklei/go-restful/constants.go | 30 +
.../emicklei/go-restful/container.go | 374 +
.../emicklei/go-restful/cors_filter.go | 202 +
.../emicklei/go-restful/coverage.sh | 2 +
.../github.com/emicklei/go-restful/curly.go | 164 +
.../emicklei/go-restful/curly_route.go | 54 +
vendor/github.com/emicklei/go-restful/doc.go | 185 +
.../emicklei/go-restful/entity_accessors.go | 162 +
.../github.com/emicklei/go-restful/filter.go | 35 +
vendor/github.com/emicklei/go-restful/json.go | 11 +
.../emicklei/go-restful/jsoniter.go | 12 +
.../github.com/emicklei/go-restful/jsr311.go | 297 +
.../github.com/emicklei/go-restful/log/log.go | 34 +
.../github.com/emicklei/go-restful/logger.go | 32 +
vendor/github.com/emicklei/go-restful/mime.go | 50 +
.../emicklei/go-restful/options_filter.go | 34 +
.../emicklei/go-restful/parameter.go | 143 +
.../emicklei/go-restful/path_expression.go | 74 +
.../emicklei/go-restful/path_processor.go | 63 +
.../github.com/emicklei/go-restful/request.go | 118 +
.../emicklei/go-restful/response.go | 255 +
.../github.com/emicklei/go-restful/route.go | 159 +
.../emicklei/go-restful/route_builder.go | 326 +
.../github.com/emicklei/go-restful/router.go | 20 +
.../emicklei/go-restful/service_error.go | 23 +
.../emicklei/go-restful/web_service.go | 290 +
.../go-restful/web_service_container.go | 39 +
.../github.com/evanphx/json-patch/.travis.yml | 19 +
vendor/github.com/evanphx/json-patch/LICENSE | 25 +
.../github.com/evanphx/json-patch/README.md | 298 +
.../github.com/evanphx/json-patch/errors.go | 38 +
vendor/github.com/evanphx/json-patch/merge.go | 386 +
vendor/github.com/evanphx/json-patch/patch.go | 784 +
vendor/github.com/fatih/color/.travis.yml | 5 +
vendor/github.com/fatih/color/Gopkg.lock | 27 +
vendor/github.com/fatih/color/Gopkg.toml | 30 +
vendor/github.com/fatih/color/LICENSE.md | 20 +
vendor/github.com/fatih/color/README.md | 179 +
vendor/github.com/fatih/color/color.go | 603 +
vendor/github.com/fatih/color/doc.go | 133 +
.../fsnotify/fsnotify/.editorconfig | 12 +
.../fsnotify/fsnotify/.gitattributes | 1 +
.../github.com/fsnotify/fsnotify/.gitignore | 6 +
.../github.com/fsnotify/fsnotify/.travis.yml | 36 +
vendor/github.com/fsnotify/fsnotify/AUTHORS | 52 +
.../github.com/fsnotify/fsnotify/CHANGELOG.md | 317 +
.../fsnotify/fsnotify/CONTRIBUTING.md | 77 +
vendor/github.com/fsnotify/fsnotify/LICENSE | 28 +
vendor/github.com/fsnotify/fsnotify/README.md | 130 +
vendor/github.com/fsnotify/fsnotify/fen.go | 37 +
.../github.com/fsnotify/fsnotify/fsnotify.go | 68 +
vendor/github.com/fsnotify/fsnotify/go.mod | 5 +
vendor/github.com/fsnotify/fsnotify/go.sum | 2 +
.../github.com/fsnotify/fsnotify/inotify.go | 337 +
.../fsnotify/fsnotify/inotify_poller.go | 187 +
vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 +
.../fsnotify/fsnotify/open_mode_bsd.go | 11 +
.../fsnotify/fsnotify/open_mode_darwin.go | 12 +
.../github.com/fsnotify/fsnotify/windows.go | 561 +
.../github.com/gardener/etcd-druid/LICENSE.md | 289 +
.../github.com/gardener/etcd-druid/NOTICE.md | 3 +
.../etcd-druid/api/v1alpha1/etcd_types.go | 324 +
.../api/v1alpha1/groupversion_info.go | 34 +
.../api/v1alpha1/zz_generated.deepcopy.go | 404 +
.../external-dns-management/LICENSE.md | 320 +
.../external-dns-management/NOTICE.md | 2 +
.../pkg/apis/dns/register.go | 25 +
.../pkg/apis/dns/v1alpha1/dnsannotation.go | 81 +
.../pkg/apis/dns/v1alpha1/dnsentry.go | 108 +
.../pkg/apis/dns/v1alpha1/dnsowner.go | 73 +
.../pkg/apis/dns/v1alpha1/dnsprovider.go | 105 +
.../pkg/apis/dns/v1alpha1/doc.go | 21 +
.../pkg/apis/dns/v1alpha1/register.go | 75 +
.../pkg/apis/dns/v1alpha1/state.go | 24 +
.../dns/v1alpha1/zz_generated.deepcopy.go | 606 +
.../gardener-resource-manager/LICENSE.md | 288 +
.../gardener-resource-manager/NOTICE.md | 15 +
.../pkg/apis/resources/register.go | 18 +
.../pkg/apis/resources/v1alpha1/doc.go | 24 +
.../pkg/apis/resources/v1alpha1/register.go | 51 +
.../pkg/apis/resources/v1alpha1/types.go | 170 +
.../v1alpha1/zz_generated.deepcopy.go | 231 +
.../pkg/manager/managedresources.go | 120 +
.../pkg/manager/managedsecrets.go | 124 +
.../gardener/.github/ISSUE_TEMPLATE/bug.md | 38 +
.../gardener/.github/ISSUE_TEMPLATE/doc.go | 16 +
.../.github/ISSUE_TEMPLATE/feature.md | 27 +
.../.github/ISSUE_TEMPLATE/flaking-test.md | 35 +
.../.github/ISSUE_TEMPLATE/support.md | 14 +
.../gardener/gardener/.github/doc.go | 16 +
.../gardener/.github/pull_request_template.md | 40 +
.../github.com/gardener/gardener/LICENSE.md | 714 +
vendor/github.com/gardener/gardener/NOTICE.md | 33 +
.../extensions/pkg/controller/cluster.go | 35 +
.../extensions/pkg/controller/cmd/cmd.go | 34 +
.../extensions/pkg/controller/cmd/options.go | 396 +
.../pkg/controller/cmd/reconciler_options.go | 60 +
.../pkg/controller/error/requeue_error.go | 37 +
.../pkg/controller/extension/actuator.go | 33 +
.../pkg/controller/extension/mapper.go | 29 +
.../pkg/controller/extension/reconciler.go | 320 +
.../pkg/controller/healthcheck/actuator.go | 137 +
.../controller/healthcheck/config/types.go | 25 +
.../pkg/controller/healthcheck/controller.go | 191 +
.../healthcheck/healtcheck_actuator.go | 284 +
.../pkg/controller/healthcheck/inject.go | 49 +
.../pkg/controller/healthcheck/reconciler.go | 304 +
.../gardener/extensions/pkg/controller/log.go | 95 +
.../pkg/controller/managedresources.go | 44 +
.../extensions/pkg/controller/reconciler.go | 76 +
.../extensions/pkg/controller/shoot.go | 98 +
.../extensions/pkg/controller/status.go | 53 +
.../extensions/pkg/controller/utils.go | 257 +
.../extensions/pkg/handler/enqueue_mapped.go | 116 +
.../gardener/extensions/pkg/handler/mapper.go | 94 +
.../gardener/extensions/pkg/inject/inject.go | 68 +
.../gardener/extensions/pkg/log/log.go | 42 +
.../extensions/pkg/predicate/mapper.go | 105 +
.../extensions/pkg/predicate/predicate.go | 298 +
.../gardener/extensions/pkg/util/clientset.go | 63 +
.../extensions/pkg/util/serialization.go | 43 +
.../gardener/extensions/pkg/util/shoot.go | 128 +
.../extensions/pkg/util/shoot_clients.go | 130 +
.../gardener/hack/.ci/component_descriptor | 56 +
.../gardener/gardener/hack/.ci/doc.go | 16 +
.../gardener/hack/.ci/prepare_release | 76 +
.../gardener/hack/.ci/set_dependency_version | 125 +
.../gardener/hack/LICENSE_BOILERPLATE.txt | 15 +
.../hack/api-reference/template/members.tpl | 48 +
.../hack/api-reference/template/pkg.tpl | 48 +
.../hack/api-reference/template/tools.go | 18 +
.../hack/api-reference/template/type.tpl | 58 +
.../gardener/gardener/hack/check-charts.sh | 36 +
.../gardener/gardener/hack/check-generate.sh | 126 +
.../gardener/gardener/hack/check.sh | 50 +
.../gardener/gardener/hack/clean.sh | 24 +
.../gardener/gardener/hack/format.sh | 21 +
.../hack/generate-controller-registration.sh | 105 +
.../gardener/gardener/hack/generate.sh | 21 +
.../gardener/hack/get-build-ld-flags.sh | 33 +
.../gardener/gardener/hack/hook-me.sh | 306 +
.../gardener/hack/install-requirements.sh | 45 +
.../gardener/gardener/hack/install.sh | 25 +
.../gardener/gardener/hack/setup-envtest.sh | 109 +
.../gardener/hack/test-cover-clean.sh | 21 +
.../gardener/gardener/hack/test-cover.sh | 35 +
.../gardener/gardener/hack/test-prometheus.sh | 28 +
.../github.com/gardener/gardener/hack/test.sh | 22 +
.../gardener/gardener/hack/tools.go | 29 +
.../gardener/gardener/hack/update-codegen.sh | 190 +
.../gardener/gardener/hack/update-protobuf.sh | 65 +
.../gardener/pkg/api/core/accessor.go | 35 +
.../gardener/pkg/api/extensions/accessor.go | 263 +
.../gardener/pkg/api/extensions/utils.go | 40 +
.../gardener/gardener/pkg/apis/core/doc.go | 19 +
.../gardener/pkg/apis/core/field_constants.go | 44 +
.../gardener/pkg/apis/core/install/install.go | 33 +
.../gardener/pkg/apis/core/register.go | 74 +
.../gardener/gardener/pkg/apis/core/types.go | 30 +
.../pkg/apis/core/types_backupbucket.go | 83 +
.../pkg/apis/core/types_backupentry.go | 68 +
.../pkg/apis/core/types_cloudprofile.go | 198 +
.../gardener/pkg/apis/core/types_common.go | 113 +
.../apis/core/types_controllerinstallation.go | 76 +
.../apis/core/types_controllerregistration.go | 99 +
.../gardener/pkg/apis/core/types_plant.go | 104 +
.../gardener/pkg/apis/core/types_project.go | 143 +
.../gardener/pkg/apis/core/types_quota.go | 67 +
.../pkg/apis/core/types_secretbinding.go | 44 +
.../gardener/pkg/apis/core/types_seed.go | 284 +
.../gardener/pkg/apis/core/types_shoot.go | 967 +
.../pkg/apis/core/types_shootstate.go | 86 +
.../gardener/pkg/apis/core/types_utils.go | 61 +
.../v1alpha1/constants/types_constants.go | 258 +
.../pkg/apis/core/v1alpha1/conversions.go | 410 +
.../pkg/apis/core/v1alpha1/defaults.go | 361 +
.../gardener/pkg/apis/core/v1alpha1/doc.go | 24 +
.../pkg/apis/core/v1alpha1/generated.pb.go | 40856 ++++++++++
.../pkg/apis/core/v1alpha1/generated.proto | 2344 +
.../core/v1alpha1/helper/condition_builder.go | 155 +
.../pkg/apis/core/v1alpha1/helper/helper.go | 781 +
.../core/v1alpha1/helper/shootstate_list.go | 142 +
.../pkg/apis/core/v1alpha1/register.go | 77 +
.../gardener/pkg/apis/core/v1alpha1/types.go | 21 +
.../apis/core/v1alpha1/types_backupbucket.go | 91 +
.../apis/core/v1alpha1/types_backupentry.go | 75 +
.../apis/core/v1alpha1/types_cloudprofile.go | 226 +
.../pkg/apis/core/v1alpha1/types_common.go | 141 +
.../v1alpha1/types_controllerinstallation.go | 80 +
.../v1alpha1/types_controllerregistration.go | 108 +
.../pkg/apis/core/v1alpha1/types_plant.go | 112 +
.../pkg/apis/core/v1alpha1/types_project.go | 174 +
.../pkg/apis/core/v1alpha1/types_quota.go | 56 +
.../apis/core/v1alpha1/types_secretbinding.go | 47 +
.../pkg/apis/core/v1alpha1/types_seed.go | 307 +
.../pkg/apis/core/v1alpha1/types_shoot.go | 1181 +
.../apis/core/v1alpha1/types_shootstate.go | 100 +
.../pkg/apis/core/v1alpha1/types_utils.go | 72 +
.../core/v1alpha1/zz_generated.conversion.go | 5035 ++
.../core/v1alpha1/zz_generated.deepcopy.go | 4109 +
.../core/v1alpha1/zz_generated.defaults.go | 138 +
.../core/v1beta1/constants/types_constants.go | 330 +
.../pkg/apis/core/v1beta1/constants/utils.go | 24 +
.../pkg/apis/core/v1beta1/conversions.go | 203 +
.../pkg/apis/core/v1beta1/defaults.go | 361 +
.../gardener/pkg/apis/core/v1beta1/doc.go | 26 +
.../pkg/apis/core/v1beta1/generated.pb.go | 39570 +++++++++
.../pkg/apis/core/v1beta1/generated.proto | 2267 +
.../core/v1beta1/helper/condition_builder.go | 155 +
.../pkg/apis/core/v1beta1/helper/errors.go | 206 +
.../pkg/apis/core/v1beta1/helper/helper.go | 1198 +
.../pkg/apis/core/v1beta1/register.go | 75 +
.../gardener/pkg/apis/core/v1beta1/types.go | 21 +
.../apis/core/v1beta1/types_backupbucket.go | 91 +
.../apis/core/v1beta1/types_backupentry.go | 75 +
.../apis/core/v1beta1/types_cloudprofile.go | 226 +
.../pkg/apis/core/v1beta1/types_common.go | 141 +
.../v1beta1/types_controllerinstallation.go | 80 +
.../v1beta1/types_controllerregistration.go | 108 +
.../pkg/apis/core/v1beta1/types_plant.go | 112 +
.../pkg/apis/core/v1beta1/types_project.go | 174 +
.../pkg/apis/core/v1beta1/types_quota.go | 56 +
.../apis/core/v1beta1/types_secretbinding.go | 47 +
.../pkg/apis/core/v1beta1/types_seed.go | 320 +
.../pkg/apis/core/v1beta1/types_shoot.go | 1179 +
.../pkg/apis/core/v1beta1/types_utils.go | 72 +
.../core/v1beta1/zz_generated.conversion.go | 4874 ++
.../core/v1beta1/zz_generated.deepcopy.go | 3949 +
.../core/v1beta1/zz_generated.defaults.go | 138 +
.../pkg/apis/core/zz_generated.deepcopy.go | 4112 +
.../gardener/pkg/apis/extensions/register.go | 19 +
.../pkg/apis/extensions/v1alpha1/doc.go | 21 +
.../pkg/apis/extensions/v1alpha1/register.go | 69 +
.../pkg/apis/extensions/v1alpha1/types.go | 87 +
.../extensions/v1alpha1/types_backupbucket.go | 81 +
.../extensions/v1alpha1/types_backupentry.go | 84 +
.../apis/extensions/v1alpha1/types_cluster.go | 58 +
.../v1alpha1/types_containerruntime.go | 87 +
.../extensions/v1alpha1/types_controlplane.go | 97 +
.../extensions/v1alpha1/types_defaults.go | 119 +
.../extensions/v1alpha1/types_extension.go | 70 +
.../v1alpha1/types_infrastructure.go | 84 +
.../apis/extensions/v1alpha1/types_network.go | 80 +
.../v1alpha1/types_operatingsystemconfig.go | 223 +
.../apis/extensions/v1alpha1/types_worker.go | 199 +
.../v1alpha1/zz_generated.deepcopy.go | 1475 +
.../gardener/pkg/chartrenderer/default.go | 197 +
.../gardener/pkg/chartrenderer/factory.go | 37 +
.../gardener/pkg/chartrenderer/renderer.go | 32 +
.../gardener/pkg/chartrenderer/sorter.go | 121 +
.../core/clientset/versioned/clientset.go | 111 +
.../client/core/clientset/versioned/doc.go | 20 +
.../core/clientset/versioned/scheme/doc.go | 20 +
.../clientset/versioned/scheme/register.go | 58 +
.../typed/core/v1alpha1/backupbucket.go | 184 +
.../typed/core/v1alpha1/backupentry.go | 195 +
.../typed/core/v1alpha1/cloudprofile.go | 168 +
.../core/v1alpha1/controllerinstallation.go | 184 +
.../core/v1alpha1/controllerregistration.go | 168 +
.../typed/core/v1alpha1/core_client.go | 144 +
.../versioned/typed/core/v1alpha1/doc.go | 20 +
.../core/v1alpha1/generated_expansion.go | 43 +
.../versioned/typed/core/v1alpha1/plant.go | 195 +
.../versioned/typed/core/v1alpha1/project.go | 184 +
.../versioned/typed/core/v1alpha1/quota.go | 178 +
.../typed/core/v1alpha1/secretbinding.go | 178 +
.../versioned/typed/core/v1alpha1/seed.go | 184 +
.../versioned/typed/core/v1alpha1/shoot.go | 195 +
.../typed/core/v1alpha1/shootstate.go | 178 +
.../typed/core/v1beta1/backupbucket.go | 184 +
.../typed/core/v1beta1/backupentry.go | 195 +
.../typed/core/v1beta1/cloudprofile.go | 168 +
.../core/v1beta1/controllerinstallation.go | 184 +
.../core/v1beta1/controllerregistration.go | 168 +
.../typed/core/v1beta1/core_client.go | 139 +
.../versioned/typed/core/v1beta1/doc.go | 20 +
.../typed/core/v1beta1/generated_expansion.go | 41 +
.../versioned/typed/core/v1beta1/plant.go | 195 +
.../versioned/typed/core/v1beta1/project.go | 184 +
.../versioned/typed/core/v1beta1/quota.go | 178 +
.../typed/core/v1beta1/secretbinding.go | 178 +
.../versioned/typed/core/v1beta1/seed.go | 184 +
.../versioned/typed/core/v1beta1/shoot.go | 195 +
.../core/listers/core/v1beta1/backupbucket.go | 68 +
.../core/listers/core/v1beta1/backupentry.go | 99 +
.../core/listers/core/v1beta1/cloudprofile.go | 68 +
.../core/v1beta1/controllerinstallation.go | 68 +
.../core/v1beta1/controllerregistration.go | 68 +
.../core/v1beta1/expansion_generated.go | 83 +
.../client/core/listers/core/v1beta1/plant.go | 99 +
.../core/listers/core/v1beta1/project.go | 68 +
.../client/core/listers/core/v1beta1/quota.go | 99 +
.../listers/core/v1beta1/secretbinding.go | 99 +
.../client/core/listers/core/v1beta1/seed.go | 68 +
.../client/core/listers/core/v1beta1/shoot.go | 99 +
.../clientset/versioned/scheme/doc.go | 20 +
.../clientset/versioned/scheme/register.go | 56 +
.../pkg/client/kubernetes/admissionplugins.go | 94 +
.../gardener/pkg/client/kubernetes/applier.go | 432 +
.../pkg/client/kubernetes/chartapplier.go | 124 +
.../pkg/client/kubernetes/chartoptions.go | 125 +
.../gardener/pkg/client/kubernetes/client.go | 319 +
.../pkg/client/kubernetes/clientset.go | 176 +
.../pkg/client/kubernetes/deployments.go | 64 +
.../pkg/client/kubernetes/manifestoptions.go | 27 +
.../gardener/pkg/client/kubernetes/options.go | 101 +
.../gardener/pkg/client/kubernetes/pods.go | 171 +
.../pkg/client/kubernetes/runtime_client.go | 158 +
.../gardener/pkg/client/kubernetes/scaling.go | 72 +
.../gardener/pkg/client/kubernetes/types.go | 179 +
.../pkg/controllerutils/associations.go | 163 +
.../pkg/controllerutils/finalizers.go | 101 +
.../pkg/controllerutils/miscellaneous.go | 122 +
.../pkg/controllerutils/operations.go | 33 +
.../gardener/pkg/controllerutils/pointers.go | 23 +
.../pkg/controllerutils/seedfilter.go | 154 +
.../gardener/pkg/controllerutils/worker.go | 133 +
.../gardener/pkg/extensions/cluster.go | 153 +
.../gardener/pkg/gardenlet/apis/config/doc.go | 18 +
.../gardenlet/apis/config/helper/helpers.go | 40 +
.../pkg/gardenlet/apis/config/register.go | 51 +
.../pkg/gardenlet/apis/config/types.go | 363 +
.../apis/config/zz_generated.deepcopy.go | 858 +
.../gardener/gardener/pkg/logger/logger.go | 98 +
.../gardener/pkg/mock/go/context/doc.go | 39 +
.../gardener/pkg/mock/go/context/funcs.go | 86 +
.../gardener/pkg/mock/go/context/mocks.go | 92 +
.../pkg/operation/common/extensions.go | 568 +
.../pkg/operation/common/managedresources.go | 106 +
.../pkg/operation/common/network_policies.go | 118 +
.../gardener/pkg/operation/common/types.go | 507 +
.../gardener/pkg/operation/common/utils.go | 779 +
.../gardener/pkg/utils/chart/chart.go | 203 +
.../gardener/gardener/pkg/utils/checksums.go | 48 +
.../gardener/pkg/utils/context/context.go | 43 +
.../gardener/pkg/utils/context/types.go | 26 +
.../gardener/gardener/pkg/utils/encoding.go | 184 +
.../gardener/pkg/utils/errors/errors.go | 262 +
.../gardener/pkg/utils/errors/multierror.go | 43 +
.../gardener/gardener/pkg/utils/flow/flow.go | 387 +
.../gardener/gardener/pkg/utils/flow/graph.go | 101 +
.../pkg/utils/flow/progress_reporter.go | 32 +
.../utils/flow/progress_reporter_delaying.go | 117 +
.../utils/flow/progress_reporter_immediate.go | 36 +
.../gardener/pkg/utils/flow/taskfn.go | 189 +
.../gardener/pkg/utils/flow/taskid.go | 155 +
.../pkg/utils/imagevector/imagevector.go | 328 +
.../imagevector/imagevector_components.go | 57 +
.../gardener/pkg/utils/imagevector/types.go | 59 +
.../gardener/pkg/utils/infodata/infodata.go | 89 +
.../gardener/pkg/utils/infodata/types.go | 50 +
.../pkg/utils/kubernetes/bootstrap_token.go | 80 +
.../kubernetes/controllerinstallation.go | 112 +
.../kubernetes/controllerregistration.go | 92 +
.../pkg/utils/kubernetes/daemonset.go | 102 +
.../pkg/utils/kubernetes/deployment.go | 140 +
.../gardener/pkg/utils/kubernetes/etcd.go | 103 +
.../pkg/utils/kubernetes/health/and.go | 35 +
.../pkg/utils/kubernetes/health/health.go | 425 +
.../pkg/utils/kubernetes/health/pod_health.go | 53 +
.../pkg/utils/kubernetes/kubernetes.go | 539 +
.../pkg/utils/kubernetes/leaderelection.go | 68 +
.../pkg/utils/kubernetes/namespace.go | 90 +
.../gardener/pkg/utils/kubernetes/node.go | 59 +
.../gardener/pkg/utils/kubernetes/object.go | 45 +
.../gardener/pkg/utils/kubernetes/patch.go | 119 +
.../gardener/pkg/utils/kubernetes/project.go | 91 +
.../gardener/pkg/utils/kubernetes/seed.go | 116 +
.../gardener/pkg/utils/kubernetes/service.go | 31 +
.../gardener/pkg/utils/kubernetes/shoot.go | 108 +
.../gardener/pkg/utils/kubernetes/sorter.go | 79 +
.../pkg/utils/kubernetes/statefulset.go | 102 +
.../gardener/pkg/utils/kubernetes/update.go | 95 +
.../gardener/pkg/utils/kubernetes/worker.go | 103 +
.../gardener/gardener/pkg/utils/labels.go | 28 +
.../managedresources/managedresources.go | 149 +
.../pkg/utils/managedresources/registry.go | 142 +
.../gardener/pkg/utils/miscellaneous.go | 145 +
.../gardener/gardener/pkg/utils/object.go | 180 +
.../gardener/gardener/pkg/utils/random.go | 65 +
.../gardener/pkg/utils/retry/alias.go | 26 +
.../gardener/pkg/utils/retry/retry.go | 211 +
.../gardener/pkg/utils/retry/types.go | 75 +
.../gardener/pkg/utils/secrets/basic_auth.go | 165 +
.../pkg/utils/secrets/basic_auth_infodata.go | 68 +
.../pkg/utils/secrets/certificate_infodata.go | 70 +
.../pkg/utils/secrets/certificates.go | 474 +
.../pkg/utils/secrets/control_plane.go | 252 +
.../gardener/pkg/utils/secrets/generate.go | 107 +
.../pkg/utils/secrets/private_key_infodata.go | 68 +
.../pkg/utils/secrets/rsa_private_key.go | 151 +
.../gardener/pkg/utils/secrets/secrets.go | 100 +
.../pkg/utils/secrets/static_token.go | 213 +
.../utils/secrets/static_token_infodata.go | 82 +
.../gardener/pkg/utils/secrets/types.go | 35 +
.../gardener/pkg/utils/secrets/vpn_tlsauth.go | 117 +
.../gardener/pkg/utils/template_engine.go | 100 +
.../gardener/gardener/pkg/utils/timewindow.go | 255 +
.../gardener/pkg/utils/version/version.go | 59 +
.../gardener/gardener/pkg/version/version.go | 65 +
.../gardener/hvpa-controller/LICENSE.md | 296 +
.../api/v1alpha1/groupversion_info.go | 68 +
.../api/v1alpha1/hvpa_types.go | 352 +
.../api/v1alpha1/hvpa_webhook.go | 75 +
.../api/v1alpha1/zz_generated.deepcopy.go | 498 +
vendor/github.com/ghodss/yaml/.gitignore | 20 +
vendor/github.com/ghodss/yaml/.travis.yml | 7 +
vendor/github.com/ghodss/yaml/LICENSE | 50 +
vendor/github.com/ghodss/yaml/README.md | 121 +
vendor/github.com/ghodss/yaml/fields.go | 501 +
vendor/github.com/ghodss/yaml/yaml.go | 277 +
vendor/github.com/go-logr/logr/LICENSE | 201 +
vendor/github.com/go-logr/logr/README.md | 183 +
vendor/github.com/go-logr/logr/discard.go | 35 +
vendor/github.com/go-logr/logr/go.mod | 3 +
vendor/github.com/go-logr/logr/logr.go | 222 +
vendor/github.com/go-logr/zapr/.gitignore | 3 +
vendor/github.com/go-logr/zapr/Gopkg.lock | 52 +
vendor/github.com/go-logr/zapr/Gopkg.toml | 38 +
vendor/github.com/go-logr/zapr/LICENSE | 201 +
vendor/github.com/go-logr/zapr/README.md | 45 +
vendor/github.com/go-logr/zapr/go.mod | 10 +
vendor/github.com/go-logr/zapr/zapr.go | 167 +
.../go-openapi/jsonpointer/.editorconfig | 26 +
.../go-openapi/jsonpointer/.gitignore | 1 +
.../go-openapi/jsonpointer/.travis.yml | 15 +
.../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 +
.../github.com/go-openapi/jsonpointer/LICENSE | 202 +
.../go-openapi/jsonpointer/README.md | 15 +
.../github.com/go-openapi/jsonpointer/go.mod | 9 +
.../github.com/go-openapi/jsonpointer/go.sum | 24 +
.../go-openapi/jsonpointer/pointer.go | 390 +
.../go-openapi/jsonreference/.gitignore | 1 +
.../go-openapi/jsonreference/.travis.yml | 15 +
.../jsonreference/CODE_OF_CONDUCT.md | 74 +
.../go-openapi/jsonreference/LICENSE | 202 +
.../go-openapi/jsonreference/README.md | 15 +
.../go-openapi/jsonreference/go.mod | 12 +
.../go-openapi/jsonreference/go.sum | 44 +
.../go-openapi/jsonreference/reference.go | 156 +
.../github.com/go-openapi/spec/.editorconfig | 26 +
vendor/github.com/go-openapi/spec/.gitignore | 2 +
.../github.com/go-openapi/spec/.golangci.yml | 28 +
vendor/github.com/go-openapi/spec/.travis.yml | 15 +
.../go-openapi/spec/CODE_OF_CONDUCT.md | 74 +
vendor/github.com/go-openapi/spec/LICENSE | 202 +
vendor/github.com/go-openapi/spec/README.md | 10 +
vendor/github.com/go-openapi/spec/bindata.go | 297 +
vendor/github.com/go-openapi/spec/cache.go | 60 +
.../go-openapi/spec/contact_info.go | 24 +
vendor/github.com/go-openapi/spec/debug.go | 47 +
vendor/github.com/go-openapi/spec/expander.go | 651 +
.../go-openapi/spec/external_docs.go | 24 +
vendor/github.com/go-openapi/spec/go.mod | 12 +
vendor/github.com/go-openapi/spec/go.sum | 49 +
vendor/github.com/go-openapi/spec/header.go | 197 +
vendor/github.com/go-openapi/spec/info.go | 165 +
vendor/github.com/go-openapi/spec/items.go | 244 +
vendor/github.com/go-openapi/spec/license.go | 23 +
.../github.com/go-openapi/spec/normalizer.go | 152 +
.../github.com/go-openapi/spec/operation.go | 398 +
.../github.com/go-openapi/spec/parameter.go | 321 +
.../github.com/go-openapi/spec/path_item.go | 87 +
vendor/github.com/go-openapi/spec/paths.go | 97 +
vendor/github.com/go-openapi/spec/ref.go | 193 +
vendor/github.com/go-openapi/spec/response.go | 131 +
.../github.com/go-openapi/spec/responses.go | 127 +
vendor/github.com/go-openapi/spec/schema.go | 596 +
.../go-openapi/spec/schema_loader.go | 271 +
.../go-openapi/spec/security_scheme.go | 140 +
vendor/github.com/go-openapi/spec/spec.go | 86 +
vendor/github.com/go-openapi/spec/swagger.go | 448 +
vendor/github.com/go-openapi/spec/tag.go | 75 +
vendor/github.com/go-openapi/spec/unused.go | 174 +
.../github.com/go-openapi/spec/xml_object.go | 68 +
.../github.com/go-openapi/swag/.editorconfig | 26 +
vendor/github.com/go-openapi/swag/.gitignore | 4 +
.../github.com/go-openapi/swag/.golangci.yml | 22 +
vendor/github.com/go-openapi/swag/.travis.yml | 15 +
.../go-openapi/swag/CODE_OF_CONDUCT.md | 74 +
vendor/github.com/go-openapi/swag/LICENSE | 202 +
vendor/github.com/go-openapi/swag/README.md | 22 +
vendor/github.com/go-openapi/swag/convert.go | 208 +
.../go-openapi/swag/convert_types.go | 595 +
vendor/github.com/go-openapi/swag/doc.go | 32 +
vendor/github.com/go-openapi/swag/go.mod | 14 +
vendor/github.com/go-openapi/swag/go.sum | 20 +
vendor/github.com/go-openapi/swag/json.go | 312 +
vendor/github.com/go-openapi/swag/loading.go | 80 +
.../github.com/go-openapi/swag/name_lexem.go | 87 +
vendor/github.com/go-openapi/swag/net.go | 38 +
vendor/github.com/go-openapi/swag/path.go | 59 +
.../github.com/go-openapi/swag/post_go18.go | 23 +
.../github.com/go-openapi/swag/post_go19.go | 67 +
vendor/github.com/go-openapi/swag/pre_go18.go | 23 +
vendor/github.com/go-openapi/swag/pre_go19.go | 69 +
vendor/github.com/go-openapi/swag/split.go | 262 +
vendor/github.com/go-openapi/swag/util.go | 385 +
vendor/github.com/go-openapi/swag/yaml.go | 246 +
vendor/github.com/gobuffalo/flect/.gitignore | 29 +
.../gobuffalo/flect/.gometalinter.json | 3 +
vendor/github.com/gobuffalo/flect/LICENSE | 21 +
vendor/github.com/gobuffalo/flect/Makefile | 61 +
vendor/github.com/gobuffalo/flect/README.md | 36 +
.../github.com/gobuffalo/flect/SHOULDERS.md | 10 +
vendor/github.com/gobuffalo/flect/acronyms.go | 152 +
.../gobuffalo/flect/azure-pipelines.yml | 71 +
.../gobuffalo/flect/azure-tests.yml | 19 +
vendor/github.com/gobuffalo/flect/camelize.go | 48 +
.../github.com/gobuffalo/flect/capitalize.go | 27 +
.../github.com/gobuffalo/flect/custom_data.go | 83 +
.../github.com/gobuffalo/flect/dasherize.go | 34 +
vendor/github.com/gobuffalo/flect/flect.go | 43 +
vendor/github.com/gobuffalo/flect/go.mod | 8 +
vendor/github.com/gobuffalo/flect/go.sum | 9 +
vendor/github.com/gobuffalo/flect/humanize.go | 35 +
vendor/github.com/gobuffalo/flect/ident.go | 106 +
.../github.com/gobuffalo/flect/lower_upper.go | 13 +
.../github.com/gobuffalo/flect/ordinalize.go | 43 +
.../github.com/gobuffalo/flect/pascalize.go | 25 +
.../gobuffalo/flect/plural_rules.go | 284 +
.../github.com/gobuffalo/flect/pluralize.go | 49 +
vendor/github.com/gobuffalo/flect/rule.go | 10 +
.../gobuffalo/flect/singular_rules.go | 23 +
.../github.com/gobuffalo/flect/singularize.go | 47 +
vendor/github.com/gobuffalo/flect/titleize.go | 30 +
.../github.com/gobuffalo/flect/underscore.go | 34 +
vendor/github.com/gobuffalo/flect/version.go | 4 +
vendor/github.com/gobuffalo/logger/.gitignore | 29 +
vendor/github.com/gobuffalo/logger/LICENSE | 21 +
vendor/github.com/gobuffalo/logger/Makefile | 61 +
vendor/github.com/gobuffalo/logger/README.md | 22 +
.../github.com/gobuffalo/logger/SHOULDERS.md | 18 +
.../github.com/gobuffalo/logger/formatter.go | 154 +
vendor/github.com/gobuffalo/logger/go.mod | 8 +
vendor/github.com/gobuffalo/logger/go.sum | 20 +
vendor/github.com/gobuffalo/logger/level.go | 25 +
vendor/github.com/gobuffalo/logger/logger.go | 67 +
vendor/github.com/gobuffalo/logger/logrus.go | 34 +
vendor/github.com/gobuffalo/logger/outable.go | 8 +
.../gobuffalo/logger/terminal_check.go | 19 +
.../logger/terminal_check_appengine.go | 11 +
vendor/github.com/gobuffalo/logger/version.go | 4 +
vendor/github.com/gobuffalo/packd/.gitignore | 29 +
vendor/github.com/gobuffalo/packd/LICENSE | 21 +
vendor/github.com/gobuffalo/packd/Makefile | 61 +
vendor/github.com/gobuffalo/packd/README.md | 24 +
.../github.com/gobuffalo/packd/SHOULDERS.md | 10 +
vendor/github.com/gobuffalo/packd/file.go | 126 +
.../github.com/gobuffalo/packd/file_info.go | 39 +
vendor/github.com/gobuffalo/packd/go.mod | 5 +
vendor/github.com/gobuffalo/packd/go.sum | 11 +
.../github.com/gobuffalo/packd/interfaces.go | 83 +
.../github.com/markbates/errx/.gitignore | 29 +
.../takeon/github.com/markbates/errx/LICENSE | 21 +
.../takeon/github.com/markbates/errx/Makefile | 61 +
.../github.com/markbates/errx/SHOULDERS.md | 6 +
.../markbates/errx/azure-pipelines.yml | 71 +
.../github.com/markbates/errx/azure-tests.yml | 19 +
.../takeon/github.com/markbates/errx/errx.go | 23 +
.../github.com/markbates/errx/version.go | 4 +
vendor/github.com/gobuffalo/packd/map.go | 70 +
.../github.com/gobuffalo/packd/memory_box.go | 156 +
.../github.com/gobuffalo/packd/skip_walker.go | 43 +
vendor/github.com/gobuffalo/packd/version.go | 4 +
.../gobuffalo/packr/v2/.gometalinter.json | 3 +
.../gobuffalo/packr/v2/.goreleaser.yml | 42 +
.../gobuffalo/packr/v2/.goreleaser.yml.plush | 39 +
.../github.com/gobuffalo/packr/v2/LICENSE.txt | 8 +
vendor/github.com/gobuffalo/packr/v2/Makefile | 46 +
.../github.com/gobuffalo/packr/v2/README.md | 239 +
.../gobuffalo/packr/v2/SHOULDERS.md | 32 +
vendor/github.com/gobuffalo/packr/v2/box.go | 240 +
.../github.com/gobuffalo/packr/v2/box_map.go | 73 +
.../gobuffalo/packr/v2/deprecated.go | 79 +
.../github.com/gobuffalo/packr/v2/dirs_map.go | 82 +
.../gobuffalo/packr/v2/file/file.go | 32 +
.../gobuffalo/packr/v2/file/info.go | 38 +
.../gobuffalo/packr/v2/file/resolver/disk.go | 111 +
.../v2/file/resolver/encoding/hex/hex.go | 314 +
.../packr/v2/file/resolver/hex_gzip.go | 112 +
.../gobuffalo/packr/v2/file/resolver/ident.go | 21 +
.../packr/v2/file/resolver/in_memory.go | 63 +
.../packr/v2/file/resolver/packable.go | 7 +
.../packr/v2/file/resolver/resolver.go | 33 +
vendor/github.com/gobuffalo/packr/v2/go.mod | 19 +
vendor/github.com/gobuffalo/packr/v2/go.sum | 180 +
.../github.com/gobuffalo/packr/v2/helpers.go | 72 +
.../gobuffalo/packr/v2/internal/envy.go | 37 +
.../github.com/gobuffalo/packr/v2/jam/pack.go | 113 +
.../gobuffalo/packr/v2/jam/parser/args.go | 52 +
.../gobuffalo/packr/v2/jam/parser/box.go | 40 +
.../gobuffalo/packr/v2/jam/parser/file.go | 54 +
.../gobuffalo/packr/v2/jam/parser/finder.go | 112 +
.../gobuffalo/packr/v2/jam/parser/gogen.go | 43 +
.../gobuffalo/packr/v2/jam/parser/parser.go | 46 +
.../gobuffalo/packr/v2/jam/parser/prospect.go | 77 +
.../gobuffalo/packr/v2/jam/parser/roots.go | 89 +
.../gobuffalo/packr/v2/jam/parser/visitor.go | 324 +
.../gobuffalo/packr/v2/jam/store/clean.go | 75 +
.../gobuffalo/packr/v2/jam/store/disk.go | 348 +
.../gobuffalo/packr/v2/jam/store/disk_tmpl.go | 51 +
.../gobuffalo/packr/v2/jam/store/env.go | 39 +
.../gobuffalo/packr/v2/jam/store/fn.go | 44 +
.../gobuffalo/packr/v2/jam/store/legacy.go | 129 +
.../gobuffalo/packr/v2/jam/store/store.go | 12 +
vendor/github.com/gobuffalo/packr/v2/packr.go | 56 +
.../gobuffalo/packr/v2/packr2/LICENSE | 21 +
.../gobuffalo/packr/v2/packr2/cmd/build.go | 28 +
.../gobuffalo/packr/v2/packr2/cmd/clean.go | 18 +
.../gobuffalo/packr/v2/packr2/cmd/fix.go | 23 +
.../gobuffalo/packr/v2/packr2/cmd/fix/fix.go | 49 +
.../packr/v2/packr2/cmd/fix/imports.go | 251 +
.../packr/v2/packr2/cmd/fix/runner.go | 47 +
.../gobuffalo/packr/v2/packr2/cmd/gocmd.go | 67 +
.../gobuffalo/packr/v2/packr2/cmd/install.go | 43 +
.../gobuffalo/packr/v2/packr2/cmd/pack.go | 25 +
.../gobuffalo/packr/v2/packr2/cmd/root.go | 82 +
.../gobuffalo/packr/v2/packr2/cmd/version.go | 21 +
.../gobuffalo/packr/v2/packr2/main.go | 7 +
.../gobuffalo/packr/v2/plog/plog.go | 41 +
.../github.com/gobuffalo/packr/v2/pointer.go | 32 +
.../gobuffalo/packr/v2/resolvers_map.go | 75 +
.../github.com/gobuffalo/packr/v2/version.go | 4 +
vendor/github.com/gobuffalo/packr/v2/walk.go | 80 +
vendor/github.com/gobwas/glob/.gitignore | 8 +
vendor/github.com/gobwas/glob/.travis.yml | 9 +
vendor/github.com/gobwas/glob/LICENSE | 21 +
vendor/github.com/gobwas/glob/bench.sh | 26 +
.../gobwas/glob/compiler/compiler.go | 525 +
vendor/github.com/gobwas/glob/glob.go | 80 +
vendor/github.com/gobwas/glob/match/any.go | 45 +
vendor/github.com/gobwas/glob/match/any_of.go | 82 +
vendor/github.com/gobwas/glob/match/btree.go | 146 +
.../github.com/gobwas/glob/match/contains.go | 58 +
.../github.com/gobwas/glob/match/every_of.go | 99 +
vendor/github.com/gobwas/glob/match/list.go | 49 +
vendor/github.com/gobwas/glob/match/match.go | 81 +
vendor/github.com/gobwas/glob/match/max.go | 49 +
vendor/github.com/gobwas/glob/match/min.go | 57 +
.../github.com/gobwas/glob/match/nothing.go | 27 +
vendor/github.com/gobwas/glob/match/prefix.go | 50 +
.../gobwas/glob/match/prefix_any.go | 55 +
.../gobwas/glob/match/prefix_suffix.go | 62 +
vendor/github.com/gobwas/glob/match/range.go | 48 +
vendor/github.com/gobwas/glob/match/row.go | 77 +
.../github.com/gobwas/glob/match/segments.go | 91 +
vendor/github.com/gobwas/glob/match/single.go | 43 +
vendor/github.com/gobwas/glob/match/suffix.go | 35 +
.../gobwas/glob/match/suffix_any.go | 43 +
vendor/github.com/gobwas/glob/match/super.go | 33 +
vendor/github.com/gobwas/glob/match/text.go | 45 +
vendor/github.com/gobwas/glob/readme.md | 148 +
.../github.com/gobwas/glob/syntax/ast/ast.go | 122 +
.../gobwas/glob/syntax/ast/parser.go | 157 +
.../gobwas/glob/syntax/lexer/lexer.go | 273 +
.../gobwas/glob/syntax/lexer/token.go | 88 +
.../github.com/gobwas/glob/syntax/syntax.go | 14 +
.../gobwas/glob/util/runes/runes.go | 154 +
.../gobwas/glob/util/strings/strings.go | 39 +
vendor/github.com/gogo/protobuf/AUTHORS | 15 +
vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 +
vendor/github.com/gogo/protobuf/LICENSE | 35 +
.../gogo/protobuf/gogoproto/Makefile | 37 +
.../github.com/gogo/protobuf/gogoproto/doc.go | 169 +
.../gogo/protobuf/gogoproto/gogo.pb.go | 874 +
.../gogo/protobuf/gogoproto/gogo.pb.golden | 45 +
.../gogo/protobuf/gogoproto/gogo.proto | 144 +
.../gogo/protobuf/gogoproto/helper.go | 415 +
.../gogo/protobuf/plugin/compare/compare.go | 580 +
.../protobuf/plugin/compare/comparetest.go | 118 +
.../plugin/defaultcheck/defaultcheck.go | 133 +
.../plugin/description/description.go | 201 +
.../plugin/description/descriptiontest.go | 73 +
.../protobuf/plugin/embedcheck/embedcheck.go | 200 +
.../plugin/enumstringer/enumstringer.go | 104 +
.../gogo/protobuf/plugin/equal/equal.go | 694 +
.../gogo/protobuf/plugin/equal/equaltest.go | 109 +
.../gogo/protobuf/plugin/face/face.go | 233 +
.../gogo/protobuf/plugin/face/facetest.go | 82 +
.../gogo/protobuf/plugin/gostring/gostring.go | 386 +
.../protobuf/plugin/gostring/gostringtest.go | 90 +
.../protobuf/plugin/marshalto/marshalto.go | 1140 +
.../protobuf/plugin/oneofcheck/oneofcheck.go | 93 +
.../gogo/protobuf/plugin/populate/populate.go | 815 +
.../gogo/protobuf/plugin/size/size.go | 696 +
.../gogo/protobuf/plugin/size/sizetest.go | 134 +
.../gogo/protobuf/plugin/stringer/stringer.go | 347 +
.../protobuf/plugin/stringer/stringertest.go | 83 +
.../gogo/protobuf/plugin/testgen/testgen.go | 608 +
.../gogo/protobuf/plugin/union/union.go | 209 +
.../gogo/protobuf/plugin/union/uniontest.go | 86 +
.../protobuf/plugin/unmarshal/unmarshal.go | 1667 +
.../github.com/gogo/protobuf/proto/Makefile | 43 +
.../github.com/gogo/protobuf/proto/clone.go | 258 +
.../gogo/protobuf/proto/custom_gogo.go | 39 +
.../github.com/gogo/protobuf/proto/decode.go | 427 +
.../gogo/protobuf/proto/deprecated.go | 63 +
.../github.com/gogo/protobuf/proto/discard.go | 350 +
.../gogo/protobuf/proto/duration.go | 100 +
.../gogo/protobuf/proto/duration_gogo.go | 49 +
.../github.com/gogo/protobuf/proto/encode.go | 205 +
.../gogo/protobuf/proto/encode_gogo.go | 33 +
.../github.com/gogo/protobuf/proto/equal.go | 300 +
.../gogo/protobuf/proto/extensions.go | 605 +
.../gogo/protobuf/proto/extensions_gogo.go | 389 +
vendor/github.com/gogo/protobuf/proto/lib.go | 973 +
.../gogo/protobuf/proto/lib_gogo.go | 50 +
.../gogo/protobuf/proto/message_set.go | 181 +
.../gogo/protobuf/proto/pointer_reflect.go | 357 +
.../protobuf/proto/pointer_reflect_gogo.go | 59 +
.../gogo/protobuf/proto/pointer_unsafe.go | 308 +
.../protobuf/proto/pointer_unsafe_gogo.go | 56 +
.../gogo/protobuf/proto/properties.go | 610 +
.../gogo/protobuf/proto/properties_gogo.go | 36 +
.../gogo/protobuf/proto/skip_gogo.go | 119 +
.../gogo/protobuf/proto/table_marshal.go | 3009 +
.../gogo/protobuf/proto/table_marshal_gogo.go | 388 +
.../gogo/protobuf/proto/table_merge.go | 676 +
.../gogo/protobuf/proto/table_unmarshal.go | 2249 +
.../protobuf/proto/table_unmarshal_gogo.go | 385 +
vendor/github.com/gogo/protobuf/proto/text.go | 930 +
.../gogo/protobuf/proto/text_gogo.go | 57 +
.../gogo/protobuf/proto/text_parser.go | 1018 +
.../gogo/protobuf/proto/timestamp.go | 113 +
.../gogo/protobuf/proto/timestamp_gogo.go | 49 +
.../gogo/protobuf/proto/wrappers.go | 1888 +
.../gogo/protobuf/proto/wrappers_gogo.go | 113 +
.../protoc-gen-gogo/descriptor/Makefile | 36 +
.../protoc-gen-gogo/descriptor/descriptor.go | 118 +
.../descriptor/descriptor.pb.go | 2865 +
.../descriptor/descriptor_gostring.gen.go | 752 +
.../protoc-gen-gogo/descriptor/helper.go | 390 +
.../protoc-gen-gogo/generator/generator.go | 3444 +
.../protoc-gen-gogo/generator/helper.go | 461 +
.../generator/internal/remap/remap.go | 117 +
.../protobuf/protoc-gen-gogo/grpc/grpc.go | 536 +
.../protobuf/protoc-gen-gogo/plugin/Makefile | 37 +
.../protoc-gen-gogo/plugin/plugin.pb.go | 365 +
.../gogo/protobuf/sortkeys/sortkeys.go | 101 +
.../gogo/protobuf/vanity/command/command.go | 161 +
.../github.com/gogo/protobuf/vanity/enum.go | 78 +
.../github.com/gogo/protobuf/vanity/field.go | 90 +
.../github.com/gogo/protobuf/vanity/file.go | 197 +
.../gogo/protobuf/vanity/foreach.go | 125 +
vendor/github.com/gogo/protobuf/vanity/msg.go | 154 +
vendor/github.com/golang/groupcache/LICENSE | 191 +
.../github.com/golang/groupcache/lru/lru.go | 133 +
vendor/github.com/golang/mock/AUTHORS | 12 +
vendor/github.com/golang/mock/CONTRIBUTORS | 37 +
vendor/github.com/golang/mock/LICENSE | 202 +
vendor/github.com/golang/mock/gomock/call.go | 433 +
.../github.com/golang/mock/gomock/callset.go | 108 +
.../golang/mock/gomock/controller.go | 328 +
.../github.com/golang/mock/gomock/matchers.go | 255 +
.../github.com/golang/mock/mockgen/mockgen.go | 662 +
.../golang/mock/mockgen/model/model.go | 486 +
.../github.com/golang/mock/mockgen/parse.go | 666 +
.../github.com/golang/mock/mockgen/reflect.go | 249 +
.../golang/mock/mockgen/version.1.11.go | 26 +
.../golang/mock/mockgen/version.1.12.go | 35 +
vendor/github.com/golang/protobuf/AUTHORS | 3 +
.../github.com/golang/protobuf/CONTRIBUTORS | 3 +
vendor/github.com/golang/protobuf/LICENSE | 28 +
.../golang/protobuf/proto/buffer.go | 324 +
.../golang/protobuf/proto/defaults.go | 63 +
.../golang/protobuf/proto/deprecated.go | 113 +
.../golang/protobuf/proto/discard.go | 58 +
.../golang/protobuf/proto/extensions.go | 356 +
.../golang/protobuf/proto/properties.go | 306 +
.../github.com/golang/protobuf/proto/proto.go | 167 +
.../golang/protobuf/proto/registry.go | 323 +
.../golang/protobuf/proto/text_decode.go | 801 +
.../golang/protobuf/proto/text_encode.go | 560 +
.../github.com/golang/protobuf/proto/wire.go | 78 +
.../golang/protobuf/proto/wrappers.go | 34 +
.../github.com/golang/protobuf/ptypes/any.go | 165 +
.../golang/protobuf/ptypes/any/any.pb.go | 62 +
.../github.com/golang/protobuf/ptypes/doc.go | 6 +
.../golang/protobuf/ptypes/duration.go | 72 +
.../protobuf/ptypes/duration/duration.pb.go | 63 +
.../golang/protobuf/ptypes/timestamp.go | 103 +
.../protobuf/ptypes/timestamp/timestamp.pb.go | 64 +
vendor/github.com/google/go-cmp/LICENSE | 27 +
.../github.com/google/go-cmp/cmp/compare.go | 682 +
.../google/go-cmp/cmp/export_panic.go | 15 +
.../google/go-cmp/cmp/export_unsafe.go | 35 +
.../go-cmp/cmp/internal/diff/debug_disable.go | 17 +
.../go-cmp/cmp/internal/diff/debug_enable.go | 122 +
.../google/go-cmp/cmp/internal/diff/diff.go | 392 +
.../google/go-cmp/cmp/internal/flags/flags.go | 9 +
.../cmp/internal/flags/toolchain_legacy.go | 10 +
.../cmp/internal/flags/toolchain_recent.go | 10 +
.../go-cmp/cmp/internal/function/func.go | 99 +
.../google/go-cmp/cmp/internal/value/name.go | 157 +
.../cmp/internal/value/pointer_purego.go | 33 +
.../cmp/internal/value/pointer_unsafe.go | 36 +
.../google/go-cmp/cmp/internal/value/sort.go | 106 +
.../google/go-cmp/cmp/internal/value/zero.go | 48 +
.../github.com/google/go-cmp/cmp/options.go | 552 +
vendor/github.com/google/go-cmp/cmp/path.go | 378 +
vendor/github.com/google/go-cmp/cmp/report.go | 54 +
.../google/go-cmp/cmp/report_compare.go | 432 +
.../google/go-cmp/cmp/report_references.go | 264 +
.../google/go-cmp/cmp/report_reflect.go | 400 +
.../google/go-cmp/cmp/report_slices.go | 448 +
.../google/go-cmp/cmp/report_text.go | 431 +
.../google/go-cmp/cmp/report_value.go | 121 +
vendor/github.com/google/gofuzz/.travis.yml | 13 +
.../github.com/google/gofuzz/CONTRIBUTING.md | 67 +
vendor/github.com/google/gofuzz/LICENSE | 202 +
vendor/github.com/google/gofuzz/README.md | 71 +
vendor/github.com/google/gofuzz/doc.go | 18 +
vendor/github.com/google/gofuzz/fuzz.go | 506 +
vendor/github.com/google/gofuzz/go.mod | 3 +
vendor/github.com/google/uuid/.travis.yml | 9 +
vendor/github.com/google/uuid/CONTRIBUTING.md | 10 +
vendor/github.com/google/uuid/CONTRIBUTORS | 9 +
vendor/github.com/google/uuid/LICENSE | 27 +
vendor/github.com/google/uuid/README.md | 19 +
vendor/github.com/google/uuid/dce.go | 80 +
vendor/github.com/google/uuid/doc.go | 12 +
vendor/github.com/google/uuid/go.mod | 1 +
vendor/github.com/google/uuid/hash.go | 53 +
vendor/github.com/google/uuid/marshal.go | 37 +
vendor/github.com/google/uuid/node.go | 90 +
vendor/github.com/google/uuid/node_js.go | 12 +
vendor/github.com/google/uuid/node_net.go | 33 +
vendor/github.com/google/uuid/sql.go | 59 +
vendor/github.com/google/uuid/time.go | 123 +
vendor/github.com/google/uuid/util.go | 43 +
vendor/github.com/google/uuid/uuid.go | 245 +
vendor/github.com/google/uuid/version1.go | 44 +
vendor/github.com/google/uuid/version4.go | 38 +
vendor/github.com/googleapis/gnostic/LICENSE | 203 +
.../googleapis/gnostic/compiler/README.md | 4 +
.../googleapis/gnostic/compiler/context.go | 43 +
.../googleapis/gnostic/compiler/error.go | 61 +
.../googleapis/gnostic/compiler/extensions.go | 85 +
.../googleapis/gnostic/compiler/helpers.go | 385 +
.../googleapis/gnostic/compiler/main.go | 16 +
.../googleapis/gnostic/compiler/reader.go | 307 +
.../googleapis/gnostic/extensions/README.md | 13 +
.../gnostic/extensions/extension.pb.go | 465 +
.../gnostic/extensions/extension.proto | 96 +
.../gnostic/extensions/extensions.go | 64 +
.../googleapis/gnostic/jsonschema/README.md | 4 +
.../googleapis/gnostic/jsonschema/base.go | 84 +
.../googleapis/gnostic/jsonschema/display.go | 229 +
.../googleapis/gnostic/jsonschema/models.go | 228 +
.../gnostic/jsonschema/operations.go | 394 +
.../googleapis/gnostic/jsonschema/reader.go | 442 +
.../googleapis/gnostic/jsonschema/schema.json | 150 +
.../googleapis/gnostic/jsonschema/writer.go | 369 +
.../googleapis/gnostic/openapiv2/OpenAPIv2.go | 8789 ++
.../gnostic/openapiv2/OpenAPIv2.pb.go | 7347 ++
.../gnostic/openapiv2/OpenAPIv2.proto | 666 +
.../googleapis/gnostic/openapiv2/README.md | 14 +
.../googleapis/gnostic/openapiv2/document.go | 26 +
.../gnostic/openapiv2/openapi-2.0.json | 1610 +
vendor/github.com/hashicorp/errwrap/LICENSE | 354 +
vendor/github.com/hashicorp/errwrap/README.md | 89 +
.../github.com/hashicorp/errwrap/errwrap.go | 169 +
vendor/github.com/hashicorp/errwrap/go.mod | 1 +
.../hashicorp/go-multierror/.travis.yml | 12 +
.../hashicorp/go-multierror/LICENSE | 353 +
.../hashicorp/go-multierror/Makefile | 31 +
.../hashicorp/go-multierror/README.md | 97 +
.../hashicorp/go-multierror/append.go | 41 +
.../hashicorp/go-multierror/flatten.go | 26 +
.../hashicorp/go-multierror/format.go | 27 +
.../github.com/hashicorp/go-multierror/go.mod | 3 +
.../github.com/hashicorp/go-multierror/go.sum | 4 +
.../hashicorp/go-multierror/multierror.go | 51 +
.../hashicorp/go-multierror/prefix.go | 37 +
.../hashicorp/go-multierror/sort.go | 16 +
.../hashicorp/golang-lru/.gitignore | 23 +
vendor/github.com/hashicorp/golang-lru/2q.go | 223 +
.../github.com/hashicorp/golang-lru/LICENSE | 362 +
.../github.com/hashicorp/golang-lru/README.md | 25 +
vendor/github.com/hashicorp/golang-lru/arc.go | 257 +
vendor/github.com/hashicorp/golang-lru/doc.go | 21 +
vendor/github.com/hashicorp/golang-lru/go.mod | 3 +
vendor/github.com/hashicorp/golang-lru/lru.go | 150 +
.../hashicorp/golang-lru/simplelru/lru.go | 177 +
.../golang-lru/simplelru/lru_interface.go | 39 +
vendor/github.com/huandu/xstrings/.gitignore | 24 +
vendor/github.com/huandu/xstrings/.travis.yml | 7 +
.../huandu/xstrings/CONTRIBUTING.md | 23 +
vendor/github.com/huandu/xstrings/LICENSE | 22 +
vendor/github.com/huandu/xstrings/README.md | 117 +
vendor/github.com/huandu/xstrings/common.go | 25 +
vendor/github.com/huandu/xstrings/convert.go | 591 +
vendor/github.com/huandu/xstrings/count.go | 120 +
vendor/github.com/huandu/xstrings/doc.go | 8 +
vendor/github.com/huandu/xstrings/format.go | 170 +
vendor/github.com/huandu/xstrings/go.mod | 3 +
.../github.com/huandu/xstrings/manipulate.go | 217 +
.../github.com/huandu/xstrings/translate.go | 547 +
.../github.com/imdario/mergo/.deepsource.toml | 12 +
vendor/github.com/imdario/mergo/.gitignore | 33 +
vendor/github.com/imdario/mergo/.travis.yml | 9 +
.../imdario/mergo/CODE_OF_CONDUCT.md | 46 +
vendor/github.com/imdario/mergo/LICENSE | 28 +
vendor/github.com/imdario/mergo/README.md | 240 +
vendor/github.com/imdario/mergo/doc.go | 143 +
vendor/github.com/imdario/mergo/go.mod | 5 +
vendor/github.com/imdario/mergo/go.sum | 4 +
vendor/github.com/imdario/mergo/map.go | 178 +
vendor/github.com/imdario/mergo/merge.go | 380 +
vendor/github.com/imdario/mergo/mergo.go | 78 +
.../inconshreveable/mousetrap/LICENSE | 13 +
.../inconshreveable/mousetrap/README.md | 23 +
.../inconshreveable/mousetrap/trap_others.go | 15 +
.../inconshreveable/mousetrap/trap_windows.go | 98 +
.../mousetrap/trap_windows_1.4.go | 46 +
.../github.com/json-iterator/go/.codecov.yml | 3 +
vendor/github.com/json-iterator/go/.gitignore | 4 +
.../github.com/json-iterator/go/.travis.yml | 14 +
vendor/github.com/json-iterator/go/Gopkg.lock | 21 +
vendor/github.com/json-iterator/go/Gopkg.toml | 26 +
vendor/github.com/json-iterator/go/LICENSE | 21 +
vendor/github.com/json-iterator/go/README.md | 87 +
vendor/github.com/json-iterator/go/adapter.go | 150 +
vendor/github.com/json-iterator/go/any.go | 325 +
.../github.com/json-iterator/go/any_array.go | 278 +
.../github.com/json-iterator/go/any_bool.go | 137 +
.../github.com/json-iterator/go/any_float.go | 83 +
.../github.com/json-iterator/go/any_int32.go | 74 +
.../github.com/json-iterator/go/any_int64.go | 74 +
.../json-iterator/go/any_invalid.go | 82 +
vendor/github.com/json-iterator/go/any_nil.go | 69 +
.../github.com/json-iterator/go/any_number.go | 123 +
.../github.com/json-iterator/go/any_object.go | 374 +
vendor/github.com/json-iterator/go/any_str.go | 166 +
.../github.com/json-iterator/go/any_uint32.go | 74 +
.../github.com/json-iterator/go/any_uint64.go | 74 +
vendor/github.com/json-iterator/go/build.sh | 12 +
vendor/github.com/json-iterator/go/config.go | 375 +
.../go/fuzzy_mode_convert_table.md | 7 +
vendor/github.com/json-iterator/go/go.mod | 11 +
vendor/github.com/json-iterator/go/go.sum | 14 +
vendor/github.com/json-iterator/go/iter.go | 349 +
.../github.com/json-iterator/go/iter_array.go | 64 +
.../github.com/json-iterator/go/iter_float.go | 339 +
.../github.com/json-iterator/go/iter_int.go | 345 +
.../json-iterator/go/iter_object.go | 267 +
.../github.com/json-iterator/go/iter_skip.go | 130 +
.../json-iterator/go/iter_skip_sloppy.go | 163 +
.../json-iterator/go/iter_skip_strict.go | 99 +
.../github.com/json-iterator/go/iter_str.go | 215 +
.../github.com/json-iterator/go/jsoniter.go | 18 +
vendor/github.com/json-iterator/go/pool.go | 42 +
vendor/github.com/json-iterator/go/reflect.go | 337 +
.../json-iterator/go/reflect_array.go | 104 +
.../json-iterator/go/reflect_dynamic.go | 70 +
.../json-iterator/go/reflect_extension.go | 483 +
.../json-iterator/go/reflect_json_number.go | 112 +
.../go/reflect_json_raw_message.go | 60 +
.../json-iterator/go/reflect_map.go | 346 +
.../json-iterator/go/reflect_marshaler.go | 225 +
.../json-iterator/go/reflect_native.go | 453 +
.../json-iterator/go/reflect_optional.go | 129 +
.../json-iterator/go/reflect_slice.go | 99 +
.../go/reflect_struct_decoder.go | 1092 +
.../go/reflect_struct_encoder.go | 211 +
vendor/github.com/json-iterator/go/stream.go | 210 +
.../json-iterator/go/stream_float.go | 111 +
.../github.com/json-iterator/go/stream_int.go | 190 +
.../github.com/json-iterator/go/stream_str.go | 372 +
vendor/github.com/json-iterator/go/test.sh | 12 +
.../github.com/karrick/godirwalk/.gitignore | 19 +
vendor/github.com/karrick/godirwalk/LICENSE | 25 +
vendor/github.com/karrick/godirwalk/README.md | 320 +
.../karrick/godirwalk/azure-pipelines.yml | 53 +
vendor/github.com/karrick/godirwalk/bench.sh | 7 +
.../karrick/godirwalk/debug_development.go | 14 +
.../karrick/godirwalk/debug_release.go | 6 +
vendor/github.com/karrick/godirwalk/dirent.go | 104 +
vendor/github.com/karrick/godirwalk/doc.go | 42 +
vendor/github.com/karrick/godirwalk/go.mod | 3 +
vendor/github.com/karrick/godirwalk/go.sum | 0
.../karrick/godirwalk/inoWithFileno.go | 9 +
.../karrick/godirwalk/inoWithIno.go | 9 +
.../github.com/karrick/godirwalk/modeType.go | 22 +
.../karrick/godirwalk/modeTypeWithType.go | 37 +
.../karrick/godirwalk/modeTypeWithoutType.go | 18 +
.../karrick/godirwalk/nameWithNamlen.go | 29 +
.../karrick/godirwalk/nameWithoutNamlen.go | 42 +
.../github.com/karrick/godirwalk/readdir.go | 53 +
.../karrick/godirwalk/readdir_unix.go | 131 +
.../karrick/godirwalk/readdir_windows.go | 66 +
.../karrick/godirwalk/reclenFromNamlen.go | 9 +
.../karrick/godirwalk/reclenFromReclen.go | 9 +
.../karrick/godirwalk/scandir_unix.go | 166 +
.../karrick/godirwalk/scandir_windows.go | 133 +
.../github.com/karrick/godirwalk/scanner.go | 44 +
vendor/github.com/karrick/godirwalk/walk.go | 379 +
vendor/github.com/mailru/easyjson/LICENSE | 7 +
.../github.com/mailru/easyjson/buffer/pool.go | 270 +
.../mailru/easyjson/jlexer/bytestostr.go | 24 +
.../easyjson/jlexer/bytestostr_nounsafe.go | 13 +
.../mailru/easyjson/jlexer/error.go | 15 +
.../mailru/easyjson/jlexer/lexer.go | 1182 +
.../mailru/easyjson/jwriter/writer.go | 390 +
vendor/github.com/markbates/errx/.gitignore | 29 +
vendor/github.com/markbates/errx/LICENSE | 21 +
vendor/github.com/markbates/errx/Makefile | 61 +
vendor/github.com/markbates/errx/SHOULDERS.md | 6 +
.../markbates/errx/azure-pipelines.yml | 71 +
.../github.com/markbates/errx/azure-tests.yml | 19 +
vendor/github.com/markbates/errx/errx.go | 49 +
vendor/github.com/markbates/errx/go.mod | 3 +
vendor/github.com/markbates/errx/go.sum | 0
vendor/github.com/markbates/errx/version.go | 4 +
vendor/github.com/markbates/oncer/.gitignore | 29 +
vendor/github.com/markbates/oncer/LICENSE | 21 +
vendor/github.com/markbates/oncer/Makefile | 61 +
.../github.com/markbates/oncer/SHOULDERS.md | 10 +
.../markbates/oncer/azure-pipelines.yml | 65 +
.../markbates/oncer/azure-tests.yml | 19 +
.../github.com/markbates/oncer/deprecate.go | 20 +
vendor/github.com/markbates/oncer/go.mod | 8 +
vendor/github.com/markbates/oncer/go.sum | 9 +
vendor/github.com/markbates/oncer/log.go | 7 +
.../github.com/markbates/oncer/log_debug.go | 19 +
vendor/github.com/markbates/oncer/oncer.go | 26 +
vendor/github.com/markbates/oncer/version.go | 4 +
vendor/github.com/markbates/safe/.gitignore | 29 +
.../markbates/safe/.gometalinter.json | 3 +
vendor/github.com/markbates/safe/.travis.yml | 26 +
vendor/github.com/markbates/safe/LICENSE | 21 +
vendor/github.com/markbates/safe/Makefile | 55 +
vendor/github.com/markbates/safe/go.mod | 7 +
vendor/github.com/markbates/safe/go.sum | 6 +
vendor/github.com/markbates/safe/safe.go | 33 +
vendor/github.com/markbates/safe/shoulders.md | 8 +
vendor/github.com/markbates/safe/version.go | 3 +
.../github.com/mattn/go-colorable/.travis.yml | 9 +
vendor/github.com/mattn/go-colorable/LICENSE | 21 +
.../github.com/mattn/go-colorable/README.md | 48 +
.../mattn/go-colorable/colorable_appengine.go | 29 +
.../mattn/go-colorable/colorable_others.go | 30 +
.../mattn/go-colorable/colorable_windows.go | 980 +
vendor/github.com/mattn/go-colorable/go.mod | 3 +
vendor/github.com/mattn/go-colorable/go.sum | 4 +
.../mattn/go-colorable/noncolorable.go | 55 +
vendor/github.com/mattn/go-isatty/.travis.yml | 14 +
vendor/github.com/mattn/go-isatty/LICENSE | 9 +
vendor/github.com/mattn/go-isatty/README.md | 50 +
vendor/github.com/mattn/go-isatty/doc.go | 2 +
vendor/github.com/mattn/go-isatty/go.mod | 5 +
vendor/github.com/mattn/go-isatty/go.sum | 2 +
vendor/github.com/mattn/go-isatty/go.test.sh | 12 +
.../github.com/mattn/go-isatty/isatty_bsd.go | 18 +
.../mattn/go-isatty/isatty_others.go | 15 +
.../mattn/go-isatty/isatty_plan9.go | 22 +
.../mattn/go-isatty/isatty_solaris.go | 22 +
.../mattn/go-isatty/isatty_tcgets.go | 18 +
.../mattn/go-isatty/isatty_windows.go | 125 +
.../github.com/mattn/go-isatty/renovate.json | 8 +
.../golang_protobuf_extensions/LICENSE | 201 +
.../golang_protobuf_extensions/NOTICE | 1 +
.../pbutil/.gitignore | 1 +
.../pbutil/Makefile | 7 +
.../pbutil/decode.go | 75 +
.../golang_protobuf_extensions/pbutil/doc.go | 16 +
.../pbutil/encode.go | 46 +
.../mitchellh/copystructure/.travis.yml | 12 +
.../mitchellh/copystructure/LICENSE | 21 +
.../mitchellh/copystructure/README.md | 21 +
.../mitchellh/copystructure/copier_time.go | 15 +
.../mitchellh/copystructure/copystructure.go | 548 +
.../github.com/mitchellh/copystructure/go.mod | 3 +
.../github.com/mitchellh/copystructure/go.sum | 2 +
.../mitchellh/reflectwalk/.travis.yml | 1 +
.../github.com/mitchellh/reflectwalk/LICENSE | 21 +
.../mitchellh/reflectwalk/README.md | 6 +
.../github.com/mitchellh/reflectwalk/go.mod | 1 +
.../mitchellh/reflectwalk/location.go | 19 +
.../mitchellh/reflectwalk/location_string.go | 16 +
.../mitchellh/reflectwalk/reflectwalk.go | 402 +
.../modern-go/concurrent/.gitignore | 1 +
.../modern-go/concurrent/.travis.yml | 14 +
.../github.com/modern-go/concurrent/LICENSE | 201 +
.../github.com/modern-go/concurrent/README.md | 49 +
.../modern-go/concurrent/executor.go | 14 +
.../modern-go/concurrent/go_above_19.go | 15 +
.../modern-go/concurrent/go_below_19.go | 33 +
vendor/github.com/modern-go/concurrent/log.go | 13 +
.../github.com/modern-go/concurrent/test.sh | 12 +
.../concurrent/unbounded_executor.go | 119 +
.../github.com/modern-go/reflect2/.gitignore | 2 +
.../github.com/modern-go/reflect2/.travis.yml | 15 +
.../github.com/modern-go/reflect2/Gopkg.lock | 15 +
.../github.com/modern-go/reflect2/Gopkg.toml | 35 +
vendor/github.com/modern-go/reflect2/LICENSE | 201 +
.../github.com/modern-go/reflect2/README.md | 71 +
.../modern-go/reflect2/go_above_17.go | 8 +
.../modern-go/reflect2/go_above_19.go | 14 +
.../modern-go/reflect2/go_below_17.go | 9 +
.../modern-go/reflect2/go_below_19.go | 14 +
.../github.com/modern-go/reflect2/reflect2.go | 298 +
.../modern-go/reflect2/reflect2_amd64.s | 0
.../modern-go/reflect2/reflect2_kind.go | 30 +
.../modern-go/reflect2/relfect2_386.s | 0
.../modern-go/reflect2/relfect2_amd64p32.s | 0
.../modern-go/reflect2/relfect2_arm.s | 0
.../modern-go/reflect2/relfect2_arm64.s | 0
.../modern-go/reflect2/relfect2_mips64x.s | 0
.../modern-go/reflect2/relfect2_mipsx.s | 0
.../modern-go/reflect2/relfect2_ppc64x.s | 0
.../modern-go/reflect2/relfect2_s390x.s | 0
.../modern-go/reflect2/safe_field.go | 58 +
.../github.com/modern-go/reflect2/safe_map.go | 101 +
.../modern-go/reflect2/safe_slice.go | 92 +
.../modern-go/reflect2/safe_struct.go | 29 +
.../modern-go/reflect2/safe_type.go | 78 +
vendor/github.com/modern-go/reflect2/test.sh | 12 +
.../github.com/modern-go/reflect2/type_map.go | 113 +
.../modern-go/reflect2/unsafe_array.go | 65 +
.../modern-go/reflect2/unsafe_eface.go | 59 +
.../modern-go/reflect2/unsafe_field.go | 74 +
.../modern-go/reflect2/unsafe_iface.go | 64 +
.../modern-go/reflect2/unsafe_link.go | 70 +
.../modern-go/reflect2/unsafe_map.go | 138 +
.../modern-go/reflect2/unsafe_ptr.go | 46 +
.../modern-go/reflect2/unsafe_slice.go | 177 +
.../modern-go/reflect2/unsafe_struct.go | 59 +
.../modern-go/reflect2/unsafe_type.go | 85 +
vendor/github.com/nxadm/tail/.gitignore | 2 +
vendor/github.com/nxadm/tail/.travis.yml | 16 +
vendor/github.com/nxadm/tail/CHANGES.md | 46 +
vendor/github.com/nxadm/tail/Dockerfile | 19 +
vendor/github.com/nxadm/tail/LICENSE | 21 +
vendor/github.com/nxadm/tail/README.md | 36 +
vendor/github.com/nxadm/tail/appveyor.yml | 11 +
vendor/github.com/nxadm/tail/go.mod | 9 +
vendor/github.com/nxadm/tail/go.sum | 6 +
.../github.com/nxadm/tail/ratelimiter/Licence | 7 +
.../nxadm/tail/ratelimiter/leakybucket.go | 97 +
.../nxadm/tail/ratelimiter/memory.go | 60 +
.../nxadm/tail/ratelimiter/storage.go | 6 +
vendor/github.com/nxadm/tail/tail.go | 440 +
vendor/github.com/nxadm/tail/tail_posix.go | 11 +
vendor/github.com/nxadm/tail/tail_windows.go | 12 +
vendor/github.com/nxadm/tail/util/util.go | 48 +
.../nxadm/tail/watch/filechanges.go | 36 +
vendor/github.com/nxadm/tail/watch/inotify.go | 135 +
.../nxadm/tail/watch/inotify_tracker.go | 248 +
vendor/github.com/nxadm/tail/watch/polling.go | 118 +
vendor/github.com/nxadm/tail/watch/watch.go | 20 +
.../github.com/nxadm/tail/winfile/winfile.go | 92 +
vendor/github.com/onsi/ginkgo/LICENSE | 20 +
.../github.com/onsi/ginkgo/config/config.go | 213 +
.../onsi/ginkgo/ginkgo/bootstrap_command.go | 199 +
.../onsi/ginkgo/ginkgo/build_command.go | 66 +
.../ginkgo/ginkgo/convert/ginkgo_ast_nodes.go | 123 +
.../onsi/ginkgo/ginkgo/convert/import.go | 90 +
.../ginkgo/ginkgo/convert/package_rewriter.go | 128 +
.../onsi/ginkgo/ginkgo/convert/test_finder.go | 56 +
.../ginkgo/convert/testfile_rewriter.go | 162 +
.../ginkgo/convert/testing_t_rewriter.go | 130 +
.../onsi/ginkgo/ginkgo/convert_command.go | 45 +
.../onsi/ginkgo/ginkgo/generate_command.go | 254 +
.../onsi/ginkgo/ginkgo/help_command.go | 31 +
.../interrupthandler/interrupt_handler.go | 52 +
.../sigquit_swallower_unix.go | 14 +
.../sigquit_swallower_windows.go | 7 +
vendor/github.com/onsi/ginkgo/ginkgo/main.go | 302 +
.../onsi/ginkgo/ginkgo/nodot/nodot.go | 196 +
.../onsi/ginkgo/ginkgo/nodot_command.go | 77 +
.../onsi/ginkgo/ginkgo/notifications.go | 141 +
.../onsi/ginkgo/ginkgo/run_command.go | 291 +
.../run_watch_and_build_command_flags.go | 169 +
.../onsi/ginkgo/ginkgo/suite_runner.go | 173 +
.../ginkgo/ginkgo/testrunner/build_args.go | 7 +
.../ginkgo/testrunner/build_args_old.go | 7 +
.../ginkgo/ginkgo/testrunner/log_writer.go | 52 +
.../ginkgo/ginkgo/testrunner/run_result.go | 27 +
.../ginkgo/ginkgo/testrunner/test_runner.go | 554 +
.../ginkgo/ginkgo/testsuite/test_suite.go | 115 +
.../ginkgo/testsuite/vendor_check_go15.go | 16 +
.../ginkgo/testsuite/vendor_check_go16.go | 15 +
.../onsi/ginkgo/ginkgo/unfocus_command.go | 180 +
.../onsi/ginkgo/ginkgo/version_command.go | 24 +
.../onsi/ginkgo/ginkgo/watch/delta.go | 22 +
.../onsi/ginkgo/ginkgo/watch/delta_tracker.go | 75 +
.../onsi/ginkgo/ginkgo/watch/dependencies.go | 92 +
.../onsi/ginkgo/ginkgo/watch/package_hash.go | 104 +
.../ginkgo/ginkgo/watch/package_hashes.go | 85 +
.../onsi/ginkgo/ginkgo/watch/suite.go | 87 +
.../onsi/ginkgo/ginkgo/watch_command.go | 175 +
.../internal/codelocation/code_location.go | 48 +
.../internal/containernode/container_node.go | 151 +
.../onsi/ginkgo/internal/failer/failer.go | 92 +
.../ginkgo/internal/leafnodes/benchmarker.go | 103 +
.../ginkgo/internal/leafnodes/interfaces.go | 19 +
.../onsi/ginkgo/internal/leafnodes/it_node.go | 47 +
.../ginkgo/internal/leafnodes/measure_node.go | 62 +
.../onsi/ginkgo/internal/leafnodes/runner.go | 117 +
.../ginkgo/internal/leafnodes/setup_nodes.go | 48 +
.../ginkgo/internal/leafnodes/suite_nodes.go | 55 +
.../synchronized_after_suite_node.go | 90 +
.../synchronized_before_suite_node.go | 181 +
.../onsi/ginkgo/internal/remote/aggregator.go | 249 +
.../internal/remote/forwarding_reporter.go | 147 +
.../internal/remote/output_interceptor.go | 13 +
.../remote/output_interceptor_darwin.go | 11 +
.../remote/output_interceptor_dragonfly.go | 11 +
.../remote/output_interceptor_freebsd.go | 11 +
.../remote/output_interceptor_linux.go | 12 +
.../output_interceptor_linux_mips64le.go | 12 +
.../remote/output_interceptor_netbsd.go | 11 +
.../remote/output_interceptor_openbsd.go | 11 +
.../remote/output_interceptor_solaris.go | 11 +
.../remote/output_interceptor_unix.go | 79 +
.../internal/remote/output_interceptor_win.go | 36 +
.../onsi/ginkgo/internal/remote/server.go | 224 +
.../onsi/ginkgo/internal/spec/spec.go | 247 +
.../onsi/ginkgo/internal/spec/specs.go | 144 +
.../internal/spec_iterator/index_computer.go | 55 +
.../spec_iterator/parallel_spec_iterator.go | 59 +
.../spec_iterator/serial_spec_iterator.go | 45 +
.../sharded_parallel_spec_iterator.go | 47 +
.../internal/spec_iterator/spec_iterator.go | 20 +
.../ginkgo/internal/writer/fake_writer.go | 36 +
.../onsi/ginkgo/internal/writer/writer.go | 89 +
.../onsi/ginkgo/reporters/default_reporter.go | 87 +
.../onsi/ginkgo/reporters/fake_reporter.go | 59 +
.../onsi/ginkgo/reporters/junit_reporter.go | 185 +
.../onsi/ginkgo/reporters/reporter.go | 15 +
.../reporters/stenographer/console_logging.go | 64 +
.../stenographer/fake_stenographer.go | 142 +
.../reporters/stenographer/stenographer.go | 572 +
.../stenographer/support/go-colorable/LICENSE | 21 +
.../support/go-colorable/README.md | 43 +
.../support/go-colorable/colorable_others.go | 24 +
.../support/go-colorable/colorable_windows.go | 783 +
.../support/go-colorable/noncolorable.go | 57 +
.../stenographer/support/go-isatty/LICENSE | 9 +
.../stenographer/support/go-isatty/README.md | 37 +
.../stenographer/support/go-isatty/doc.go | 2 +
.../support/go-isatty/isatty_appengine.go | 9 +
.../support/go-isatty/isatty_bsd.go | 18 +
.../support/go-isatty/isatty_linux.go | 18 +
.../support/go-isatty/isatty_solaris.go | 16 +
.../support/go-isatty/isatty_windows.go | 19 +
.../ginkgo/reporters/teamcity_reporter.go | 106 +
.../onsi/ginkgo/types/code_location.go | 15 +
.../onsi/ginkgo/types/synchronization.go | 30 +
vendor/github.com/onsi/ginkgo/types/types.go | 174 +
vendor/github.com/pkg/errors/.gitignore | 24 +
vendor/github.com/pkg/errors/.travis.yml | 10 +
vendor/github.com/pkg/errors/LICENSE | 23 +
vendor/github.com/pkg/errors/Makefile | 44 +
vendor/github.com/pkg/errors/README.md | 59 +
vendor/github.com/pkg/errors/appveyor.yml | 32 +
vendor/github.com/pkg/errors/errors.go | 288 +
vendor/github.com/pkg/errors/go113.go | 38 +
vendor/github.com/pkg/errors/stack.go | 177 +
.../prometheus/client_golang/LICENSE | 201 +
.../prometheus/client_golang/NOTICE | 23 +
.../client_golang/prometheus/.gitignore | 1 +
.../client_golang/prometheus/README.md | 1 +
.../client_golang/prometheus/build_info.go | 29 +
.../prometheus/build_info_pre_1.12.go | 22 +
.../client_golang/prometheus/collector.go | 120 +
.../client_golang/prometheus/counter.go | 321 +
.../client_golang/prometheus/desc.go | 186 +
.../client_golang/prometheus/doc.go | 199 +
.../prometheus/expvar_collector.go | 119 +
.../client_golang/prometheus/fnv.go | 42 +
.../client_golang/prometheus/gauge.go | 289 +
.../client_golang/prometheus/go_collector.go | 396 +
.../client_golang/prometheus/histogram.go | 637 +
.../prometheus/internal/metric.go | 85 +
.../client_golang/prometheus/labels.go | 87 +
.../client_golang/prometheus/metric.go | 176 +
.../client_golang/prometheus/observer.go | 64 +
.../prometheus/process_collector.go | 151 +
.../prometheus/process_collector_other.go | 65 +
.../prometheus/process_collector_windows.go | 116 +
.../prometheus/promhttp/delegator.go | 370 +
.../client_golang/prometheus/promhttp/http.go | 379 +
.../prometheus/promhttp/instrument_client.go | 219 +
.../prometheus/promhttp/instrument_server.go | 447 +
.../client_golang/prometheus/registry.go | 948 +
.../client_golang/prometheus/summary.go | 737 +
.../client_golang/prometheus/timer.go | 54 +
.../client_golang/prometheus/untyped.go | 42 +
.../client_golang/prometheus/value.go | 205 +
.../client_golang/prometheus/vec.go | 484 +
.../client_golang/prometheus/wrap.go | 212 +
.../prometheus/client_model/LICENSE | 201 +
.../github.com/prometheus/client_model/NOTICE | 5 +
.../prometheus/client_model/go/metrics.pb.go | 723 +
vendor/github.com/prometheus/common/LICENSE | 201 +
vendor/github.com/prometheus/common/NOTICE | 5 +
.../prometheus/common/expfmt/decode.go | 429 +
.../prometheus/common/expfmt/encode.go | 162 +
.../prometheus/common/expfmt/expfmt.go | 41 +
.../prometheus/common/expfmt/fuzz.go | 36 +
.../common/expfmt/openmetrics_create.go | 527 +
.../prometheus/common/expfmt/text_create.go | 465 +
.../prometheus/common/expfmt/text_parse.go | 764 +
.../bitbucket.org/ww/goautoneg/README.txt | 67 +
.../bitbucket.org/ww/goautoneg/autoneg.go | 162 +
.../prometheus/common/model/alert.go | 136 +
.../prometheus/common/model/fingerprinting.go | 105 +
.../github.com/prometheus/common/model/fnv.go | 42 +
.../prometheus/common/model/labels.go | 210 +
.../prometheus/common/model/labelset.go | 169 +
.../prometheus/common/model/metric.go | 102 +
.../prometheus/common/model/model.go | 16 +
.../prometheus/common/model/signature.go | 144 +
.../prometheus/common/model/silence.go | 106 +
.../prometheus/common/model/time.go | 274 +
.../prometheus/common/model/value.go | 416 +
.../github.com/prometheus/procfs/.gitignore | 1 +
.../prometheus/procfs/.golangci.yml | 4 +
.../prometheus/procfs/CONTRIBUTING.md | 121 +
vendor/github.com/prometheus/procfs/LICENSE | 201 +
.../prometheus/procfs/MAINTAINERS.md | 2 +
vendor/github.com/prometheus/procfs/Makefile | 29 +
.../prometheus/procfs/Makefile.common | 300 +
vendor/github.com/prometheus/procfs/NOTICE | 7 +
vendor/github.com/prometheus/procfs/README.md | 61 +
vendor/github.com/prometheus/procfs/arp.go | 85 +
.../github.com/prometheus/procfs/buddyinfo.go | 85 +
.../github.com/prometheus/procfs/cpuinfo.go | 420 +
.../prometheus/procfs/cpuinfo_arm.go | 18 +
.../prometheus/procfs/cpuinfo_arm64.go | 19 +
.../prometheus/procfs/cpuinfo_default.go | 19 +
.../prometheus/procfs/cpuinfo_mips.go | 18 +
.../prometheus/procfs/cpuinfo_mips64.go | 18 +
.../prometheus/procfs/cpuinfo_mips64le.go | 18 +
.../prometheus/procfs/cpuinfo_mipsle.go | 18 +
.../prometheus/procfs/cpuinfo_ppc64.go | 18 +
.../prometheus/procfs/cpuinfo_ppc64le.go | 18 +
.../prometheus/procfs/cpuinfo_s390x.go | 18 +
vendor/github.com/prometheus/procfs/crypto.go | 153 +
vendor/github.com/prometheus/procfs/doc.go | 45 +
.../prometheus/procfs/fixtures.ttar | 6114 ++
vendor/github.com/prometheus/procfs/fs.go | 43 +
.../github.com/prometheus/procfs/fscache.go | 422 +
vendor/github.com/prometheus/procfs/go.mod | 9 +
vendor/github.com/prometheus/procfs/go.sum | 6 +
.../prometheus/procfs/internal/fs/fs.go | 55 +
.../prometheus/procfs/internal/util/parse.go | 97 +
.../procfs/internal/util/readfile.go | 38 +
.../procfs/internal/util/sysreadfile.go | 48 +
.../internal/util/sysreadfile_compat.go | 26 +
.../procfs/internal/util/valueparser.go | 91 +
vendor/github.com/prometheus/procfs/ipvs.go | 241 +
.../prometheus/procfs/kernel_random.go | 62 +
.../github.com/prometheus/procfs/loadavg.go | 62 +
vendor/github.com/prometheus/procfs/mdstat.go | 194 +
.../github.com/prometheus/procfs/meminfo.go | 277 +
.../github.com/prometheus/procfs/mountinfo.go | 180 +
.../prometheus/procfs/mountstats.go | 629 +
.../prometheus/procfs/net_conntrackstat.go | 153 +
.../github.com/prometheus/procfs/net_dev.go | 205 +
.../prometheus/procfs/net_sockstat.go | 163 +
.../prometheus/procfs/net_softnet.go | 102 +
.../github.com/prometheus/procfs/net_udp.go | 229 +
.../github.com/prometheus/procfs/net_unix.go | 257 +
vendor/github.com/prometheus/procfs/proc.go | 319 +
.../prometheus/procfs/proc_cgroup.go | 98 +
.../prometheus/procfs/proc_environ.go | 37 +
.../prometheus/procfs/proc_fdinfo.go | 133 +
.../github.com/prometheus/procfs/proc_io.go | 59 +
.../prometheus/procfs/proc_limits.go | 157 +
.../github.com/prometheus/procfs/proc_maps.go | 209 +
.../github.com/prometheus/procfs/proc_ns.go | 68 +
.../github.com/prometheus/procfs/proc_psi.go | 100 +
.../prometheus/procfs/proc_smaps.go | 165 +
.../github.com/prometheus/procfs/proc_stat.go | 192 +
.../prometheus/procfs/proc_status.go | 166 +
.../github.com/prometheus/procfs/schedstat.go | 118 +
vendor/github.com/prometheus/procfs/stat.go | 244 +
vendor/github.com/prometheus/procfs/swaps.go | 89 +
vendor/github.com/prometheus/procfs/ttar | 413 +
vendor/github.com/prometheus/procfs/vm.go | 210 +
vendor/github.com/prometheus/procfs/xfrm.go | 187 +
.../github.com/prometheus/procfs/zoneinfo.go | 196 +
.../github.com/rancher/fleet/pkg/apis/LICENSE | 178 +
.../apis/fleet.cattle.io/v1alpha1/bundle.go | 265 +
.../pkg/apis/fleet.cattle.io/v1alpha1/doc.go | 21 +
.../pkg/apis/fleet.cattle.io/v1alpha1/git.go | 78 +
.../apis/fleet.cattle.io/v1alpha1/target.go | 136 +
.../apis/fleet.cattle.io/v1alpha1/values.go | 26 +
.../v1alpha1/zz_generated_deepcopy.go | 1429 +
.../v1alpha1/zz_generated_list_types.go | 195 +
.../v1alpha1/zz_generated_register.go | 87 +
.../fleet.cattle.io/zz_generated_register.go | 24 +
vendor/github.com/rancher/wrangler/LICENSE | 178 +
.../wrangler/pkg/data/convert/convert.go | 302 +
.../rancher/wrangler/pkg/data/data.go | 59 +
.../rancher/wrangler/pkg/data/merge.go | 24 +
.../rancher/wrangler/pkg/data/values.go | 58 +
.../pkg/genericcondition/condition.go | 18 +
.../rancher/wrangler/pkg/kv/split.go | 45 +
.../wrangler/pkg/summary/cattletypes.go | 27 +
.../rancher/wrangler/pkg/summary/condition.go | 45 +
.../rancher/wrangler/pkg/summary/coretypes.go | 153 +
.../wrangler/pkg/summary/summarized.go | 98 +
.../wrangler/pkg/summary/summarizers.go | 425 +
.../rancher/wrangler/pkg/summary/summary.go | 123 +
.../github.com/rogpeppe/go-internal/LICENSE | 27 +
.../rogpeppe/go-internal/modfile/gopkgin.go | 47 +
.../rogpeppe/go-internal/modfile/print.go | 164 +
.../rogpeppe/go-internal/modfile/read.go | 869 +
.../rogpeppe/go-internal/modfile/rule.go | 724 +
.../rogpeppe/go-internal/module/module.go | 540 +
.../rogpeppe/go-internal/semver/semver.go | 388 +
.../russross/blackfriday/v2/.gitignore | 8 +
.../russross/blackfriday/v2/.travis.yml | 17 +
.../russross/blackfriday/v2/LICENSE.txt | 29 +
.../russross/blackfriday/v2/README.md | 291 +
.../russross/blackfriday/v2/block.go | 1590 +
.../github.com/russross/blackfriday/v2/doc.go | 18 +
.../github.com/russross/blackfriday/v2/esc.go | 34 +
.../github.com/russross/blackfriday/v2/go.mod | 1 +
.../russross/blackfriday/v2/html.go | 949 +
.../russross/blackfriday/v2/inline.go | 1228 +
.../russross/blackfriday/v2/markdown.go | 950 +
.../russross/blackfriday/v2/node.go | 354 +
.../russross/blackfriday/v2/smartypants.go | 457 +
.../sanitized_anchor_name/.travis.yml | 16 +
.../shurcooL/sanitized_anchor_name/LICENSE | 21 +
.../shurcooL/sanitized_anchor_name/README.md | 36 +
.../shurcooL/sanitized_anchor_name/go.mod | 1 +
.../shurcooL/sanitized_anchor_name/main.go | 29 +
vendor/github.com/sirupsen/logrus/.gitignore | 4 +
.../github.com/sirupsen/logrus/.golangci.yml | 40 +
vendor/github.com/sirupsen/logrus/.travis.yml | 17 +
.../github.com/sirupsen/logrus/CHANGELOG.md | 223 +
vendor/github.com/sirupsen/logrus/LICENSE | 21 +
vendor/github.com/sirupsen/logrus/README.md | 513 +
vendor/github.com/sirupsen/logrus/alt_exit.go | 76 +
.../github.com/sirupsen/logrus/appveyor.yml | 14 +
.../github.com/sirupsen/logrus/buffer_pool.go | 52 +
vendor/github.com/sirupsen/logrus/doc.go | 26 +
vendor/github.com/sirupsen/logrus/entry.go | 422 +
vendor/github.com/sirupsen/logrus/exported.go | 270 +
.../github.com/sirupsen/logrus/formatter.go | 78 +
vendor/github.com/sirupsen/logrus/go.mod | 10 +
vendor/github.com/sirupsen/logrus/go.sum | 10 +
vendor/github.com/sirupsen/logrus/hooks.go | 34 +
.../sirupsen/logrus/json_formatter.go | 125 +
vendor/github.com/sirupsen/logrus/logger.go | 404 +
vendor/github.com/sirupsen/logrus/logrus.go | 186 +
.../logrus/terminal_check_appengine.go | 11 +
.../sirupsen/logrus/terminal_check_bsd.go | 13 +
.../sirupsen/logrus/terminal_check_js.go | 7 +
.../logrus/terminal_check_no_terminal.go | 11 +
.../logrus/terminal_check_notappengine.go | 17 +
.../sirupsen/logrus/terminal_check_solaris.go | 11 +
.../sirupsen/logrus/terminal_check_unix.go | 13 +
.../sirupsen/logrus/terminal_check_windows.go | 27 +
.../sirupsen/logrus/text_formatter.go | 334 +
vendor/github.com/sirupsen/logrus/writer.go | 70 +
vendor/github.com/spf13/cobra/.gitignore | 39 +
vendor/github.com/spf13/cobra/.mailmap | 3 +
vendor/github.com/spf13/cobra/.travis.yml | 29 +
vendor/github.com/spf13/cobra/CHANGELOG.md | 22 +
vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 +
vendor/github.com/spf13/cobra/LICENSE.txt | 174 +
vendor/github.com/spf13/cobra/Makefile | 36 +
vendor/github.com/spf13/cobra/README.md | 766 +
vendor/github.com/spf13/cobra/args.go | 109 +
.../spf13/cobra/bash_completions.go | 678 +
.../spf13/cobra/bash_completions.md | 91 +
vendor/github.com/spf13/cobra/cobra.go | 207 +
vendor/github.com/spf13/cobra/command.go | 1664 +
.../github.com/spf13/cobra/command_notwin.go | 5 +
vendor/github.com/spf13/cobra/command_win.go | 26 +
.../spf13/cobra/custom_completions.go | 557 +
.../spf13/cobra/fish_completions.go | 207 +
.../spf13/cobra/fish_completions.md | 4 +
vendor/github.com/spf13/cobra/go.mod | 12 +
vendor/github.com/spf13/cobra/go.sum | 313 +
.../spf13/cobra/powershell_completions.go | 100 +
.../spf13/cobra/powershell_completions.md | 16 +
.../spf13/cobra/projects_using_cobra.md | 35 +
.../spf13/cobra/shell_completions.go | 84 +
.../spf13/cobra/shell_completions.md | 434 +
.../github.com/spf13/cobra/zsh_completions.go | 240 +
.../github.com/spf13/cobra/zsh_completions.md | 48 +
vendor/github.com/spf13/pflag/.gitignore | 2 +
vendor/github.com/spf13/pflag/.travis.yml | 22 +
vendor/github.com/spf13/pflag/LICENSE | 28 +
vendor/github.com/spf13/pflag/README.md | 296 +
vendor/github.com/spf13/pflag/bool.go | 94 +
vendor/github.com/spf13/pflag/bool_slice.go | 185 +
vendor/github.com/spf13/pflag/bytes.go | 209 +
vendor/github.com/spf13/pflag/count.go | 96 +
vendor/github.com/spf13/pflag/duration.go | 86 +
.../github.com/spf13/pflag/duration_slice.go | 166 +
vendor/github.com/spf13/pflag/flag.go | 1239 +
vendor/github.com/spf13/pflag/float32.go | 88 +
.../github.com/spf13/pflag/float32_slice.go | 174 +
vendor/github.com/spf13/pflag/float64.go | 84 +
.../github.com/spf13/pflag/float64_slice.go | 166 +
vendor/github.com/spf13/pflag/go.mod | 3 +
vendor/github.com/spf13/pflag/go.sum | 0
vendor/github.com/spf13/pflag/golangflag.go | 105 +
vendor/github.com/spf13/pflag/int.go | 84 +
vendor/github.com/spf13/pflag/int16.go | 88 +
vendor/github.com/spf13/pflag/int32.go | 88 +
vendor/github.com/spf13/pflag/int32_slice.go | 174 +
vendor/github.com/spf13/pflag/int64.go | 84 +
vendor/github.com/spf13/pflag/int64_slice.go | 166 +
vendor/github.com/spf13/pflag/int8.go | 88 +
vendor/github.com/spf13/pflag/int_slice.go | 158 +
vendor/github.com/spf13/pflag/ip.go | 94 +
vendor/github.com/spf13/pflag/ip_slice.go | 186 +
vendor/github.com/spf13/pflag/ipmask.go | 122 +
vendor/github.com/spf13/pflag/ipnet.go | 98 +
vendor/github.com/spf13/pflag/string.go | 80 +
vendor/github.com/spf13/pflag/string_array.go | 129 +
vendor/github.com/spf13/pflag/string_slice.go | 163 +
.../github.com/spf13/pflag/string_to_int.go | 149 +
.../github.com/spf13/pflag/string_to_int64.go | 149 +
.../spf13/pflag/string_to_string.go | 160 +
vendor/github.com/spf13/pflag/uint.go | 88 +
vendor/github.com/spf13/pflag/uint16.go | 88 +
vendor/github.com/spf13/pflag/uint32.go | 88 +
vendor/github.com/spf13/pflag/uint64.go | 88 +
vendor/github.com/spf13/pflag/uint8.go | 88 +
vendor/github.com/spf13/pflag/uint_slice.go | 168 +
vendor/go.uber.org/atomic/.codecov.yml | 15 +
vendor/go.uber.org/atomic/.gitignore | 12 +
vendor/go.uber.org/atomic/.travis.yml | 27 +
vendor/go.uber.org/atomic/CHANGELOG.md | 64 +
vendor/go.uber.org/atomic/LICENSE.txt | 19 +
vendor/go.uber.org/atomic/Makefile | 35 +
vendor/go.uber.org/atomic/README.md | 63 +
vendor/go.uber.org/atomic/atomic.go | 356 +
vendor/go.uber.org/atomic/error.go | 55 +
vendor/go.uber.org/atomic/go.mod | 10 +
vendor/go.uber.org/atomic/go.sum | 22 +
vendor/go.uber.org/atomic/string.go | 49 +
vendor/go.uber.org/multierr/.codecov.yml | 15 +
vendor/go.uber.org/multierr/.gitignore | 4 +
vendor/go.uber.org/multierr/.travis.yml | 29 +
vendor/go.uber.org/multierr/CHANGELOG.md | 54 +
vendor/go.uber.org/multierr/LICENSE.txt | 19 +
vendor/go.uber.org/multierr/Makefile | 42 +
vendor/go.uber.org/multierr/README.md | 23 +
vendor/go.uber.org/multierr/error.go | 449 +
vendor/go.uber.org/multierr/glide.yaml | 8 +
vendor/go.uber.org/multierr/go.mod | 12 +
vendor/go.uber.org/multierr/go.sum | 45 +
vendor/go.uber.org/multierr/go113.go | 52 +
vendor/go.uber.org/zap/.codecov.yml | 17 +
vendor/go.uber.org/zap/.gitignore | 32 +
vendor/go.uber.org/zap/.readme.tmpl | 109 +
vendor/go.uber.org/zap/.travis.yml | 23 +
vendor/go.uber.org/zap/CHANGELOG.md | 401 +
vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 +
vendor/go.uber.org/zap/CONTRIBUTING.md | 81 +
vendor/go.uber.org/zap/FAQ.md | 155 +
vendor/go.uber.org/zap/LICENSE.txt | 19 +
vendor/go.uber.org/zap/Makefile | 63 +
vendor/go.uber.org/zap/README.md | 134 +
vendor/go.uber.org/zap/array.go | 320 +
vendor/go.uber.org/zap/buffer/buffer.go | 123 +
vendor/go.uber.org/zap/buffer/pool.go | 49 +
vendor/go.uber.org/zap/checklicense.sh | 17 +
vendor/go.uber.org/zap/config.go | 262 +
vendor/go.uber.org/zap/doc.go | 113 +
vendor/go.uber.org/zap/encoder.go | 79 +
vendor/go.uber.org/zap/error.go | 80 +
vendor/go.uber.org/zap/field.go | 533 +
vendor/go.uber.org/zap/flag.go | 39 +
vendor/go.uber.org/zap/glide.yaml | 34 +
vendor/go.uber.org/zap/global.go | 168 +
vendor/go.uber.org/zap/global_go112.go | 26 +
vendor/go.uber.org/zap/global_prego112.go | 26 +
vendor/go.uber.org/zap/go.mod | 12 +
vendor/go.uber.org/zap/go.sum | 56 +
vendor/go.uber.org/zap/http_handler.go | 81 +
.../zap/internal/bufferpool/bufferpool.go | 31 +
.../go.uber.org/zap/internal/color/color.go | 44 +
vendor/go.uber.org/zap/internal/exit/exit.go | 64 +
vendor/go.uber.org/zap/level.go | 132 +
vendor/go.uber.org/zap/logger.go | 311 +
vendor/go.uber.org/zap/options.go | 133 +
vendor/go.uber.org/zap/sink.go | 161 +
vendor/go.uber.org/zap/stacktrace.go | 126 +
vendor/go.uber.org/zap/sugar.go | 304 +
vendor/go.uber.org/zap/time.go | 27 +
vendor/go.uber.org/zap/writer.go | 99 +
.../zap/zapcore/console_encoder.go | 147 +
vendor/go.uber.org/zap/zapcore/core.go | 113 +
vendor/go.uber.org/zap/zapcore/doc.go | 24 +
vendor/go.uber.org/zap/zapcore/encoder.go | 401 +
vendor/go.uber.org/zap/zapcore/entry.go | 258 +
vendor/go.uber.org/zap/zapcore/error.go | 115 +
vendor/go.uber.org/zap/zapcore/field.go | 217 +
vendor/go.uber.org/zap/zapcore/hook.go | 68 +
.../go.uber.org/zap/zapcore/increase_level.go | 66 +
.../go.uber.org/zap/zapcore/json_encoder.go | 524 +
vendor/go.uber.org/zap/zapcore/level.go | 175 +
.../go.uber.org/zap/zapcore/level_strings.go | 46 +
vendor/go.uber.org/zap/zapcore/marshaler.go | 53 +
.../go.uber.org/zap/zapcore/memory_encoder.go | 179 +
vendor/go.uber.org/zap/zapcore/sampler.go | 208 +
vendor/go.uber.org/zap/zapcore/tee.go | 81 +
.../go.uber.org/zap/zapcore/write_syncer.go | 123 +
vendor/golang.org/x/crypto/AUTHORS | 3 +
vendor/golang.org/x/crypto/CONTRIBUTORS | 3 +
vendor/golang.org/x/crypto/LICENSE | 27 +
vendor/golang.org/x/crypto/PATENTS | 22 +
vendor/golang.org/x/crypto/blowfish/block.go | 159 +
vendor/golang.org/x/crypto/blowfish/cipher.go | 99 +
vendor/golang.org/x/crypto/blowfish/const.go | 199 +
.../x/crypto/chacha20/chacha_arm64.go | 16 +
.../x/crypto/chacha20/chacha_arm64.s | 307 +
.../x/crypto/chacha20/chacha_generic.go | 398 +
.../x/crypto/chacha20/chacha_noasm.go | 13 +
.../x/crypto/chacha20/chacha_ppc64le.go | 16 +
.../x/crypto/chacha20/chacha_ppc64le.s | 449 +
.../x/crypto/chacha20/chacha_s390x.go | 26 +
.../x/crypto/chacha20/chacha_s390x.s | 224 +
vendor/golang.org/x/crypto/chacha20/xor.go | 42 +
.../x/crypto/curve25519/curve25519.go | 95 +
.../x/crypto/curve25519/curve25519_amd64.go | 240 +
.../x/crypto/curve25519/curve25519_amd64.s | 1793 +
.../x/crypto/curve25519/curve25519_generic.go | 828 +
.../x/crypto/curve25519/curve25519_noasm.go | 11 +
vendor/golang.org/x/crypto/ed25519/ed25519.go | 222 +
.../x/crypto/ed25519/ed25519_go113.go | 73 +
.../ed25519/internal/edwards25519/const.go | 1422 +
.../internal/edwards25519/edwards25519.go | 1793 +
.../x/crypto/internal/subtle/aliasing.go | 32 +
.../crypto/internal/subtle/aliasing_purego.go | 35 +
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 +
.../x/crypto/poly1305/bits_compat.go | 39 +
.../x/crypto/poly1305/bits_go1.13.go | 21 +
.../golang.org/x/crypto/poly1305/mac_noasm.go | 9 +
.../golang.org/x/crypto/poly1305/poly1305.go | 99 +
.../golang.org/x/crypto/poly1305/sum_amd64.go | 47 +
.../golang.org/x/crypto/poly1305/sum_amd64.s | 108 +
.../x/crypto/poly1305/sum_generic.go | 310 +
.../x/crypto/poly1305/sum_ppc64le.go | 47 +
.../x/crypto/poly1305/sum_ppc64le.s | 181 +
.../golang.org/x/crypto/poly1305/sum_s390x.go | 75 +
.../golang.org/x/crypto/poly1305/sum_s390x.s | 503 +
vendor/golang.org/x/crypto/scrypt/scrypt.go | 213 +
vendor/golang.org/x/crypto/ssh/buffer.go | 97 +
vendor/golang.org/x/crypto/ssh/certs.go | 546 +
vendor/golang.org/x/crypto/ssh/channel.go | 633 +
vendor/golang.org/x/crypto/ssh/cipher.go | 781 +
vendor/golang.org/x/crypto/ssh/client.go | 278 +
vendor/golang.org/x/crypto/ssh/client_auth.go | 641 +
vendor/golang.org/x/crypto/ssh/common.go | 404 +
vendor/golang.org/x/crypto/ssh/connection.go | 143 +
vendor/golang.org/x/crypto/ssh/doc.go | 21 +
vendor/golang.org/x/crypto/ssh/handshake.go | 647 +
.../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 +
vendor/golang.org/x/crypto/ssh/kex.go | 782 +
vendor/golang.org/x/crypto/ssh/keys.go | 1474 +
vendor/golang.org/x/crypto/ssh/mac.go | 61 +
vendor/golang.org/x/crypto/ssh/messages.go | 866 +
vendor/golang.org/x/crypto/ssh/mux.go | 351 +
vendor/golang.org/x/crypto/ssh/server.go | 720 +
vendor/golang.org/x/crypto/ssh/session.go | 647 +
vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 +
vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 +
vendor/golang.org/x/crypto/ssh/tcpip.go | 474 +
.../x/crypto/ssh/terminal/terminal.go | 76 +
vendor/golang.org/x/crypto/ssh/transport.go | 353 +
vendor/golang.org/x/lint/.travis.yml | 19 +
vendor/golang.org/x/lint/CONTRIBUTING.md | 15 +
vendor/golang.org/x/lint/LICENSE | 27 +
vendor/golang.org/x/lint/README.md | 88 +
vendor/golang.org/x/lint/go.mod | 5 +
vendor/golang.org/x/lint/go.sum | 12 +
vendor/golang.org/x/lint/golint/golint.go | 159 +
vendor/golang.org/x/lint/golint/import.go | 309 +
.../golang.org/x/lint/golint/importcomment.go | 13 +
vendor/golang.org/x/lint/lint.go | 1615 +
vendor/golang.org/x/mod/LICENSE | 27 +
vendor/golang.org/x/mod/PATENTS | 22 +
.../x/mod/internal/lazyregexp/lazyre.go | 78 +
vendor/golang.org/x/mod/modfile/print.go | 174 +
vendor/golang.org/x/mod/modfile/read.go | 948 +
vendor/golang.org/x/mod/modfile/rule.go | 860 +
vendor/golang.org/x/mod/module/module.go | 718 +
vendor/golang.org/x/mod/semver/semver.go | 388 +
vendor/golang.org/x/net/AUTHORS | 3 +
vendor/golang.org/x/net/CONTRIBUTORS | 3 +
vendor/golang.org/x/net/LICENSE | 27 +
vendor/golang.org/x/net/PATENTS | 22 +
vendor/golang.org/x/net/context/context.go | 56 +
.../x/net/context/ctxhttp/ctxhttp.go | 71 +
vendor/golang.org/x/net/context/go17.go | 72 +
vendor/golang.org/x/net/context/go19.go | 20 +
vendor/golang.org/x/net/context/pre_go17.go | 300 +
vendor/golang.org/x/net/context/pre_go19.go | 109 +
vendor/golang.org/x/net/http/httpguts/guts.go | 50 +
.../golang.org/x/net/http/httpguts/httplex.go | 346 +
vendor/golang.org/x/net/http2/.gitignore | 2 +
vendor/golang.org/x/net/http2/Dockerfile | 51 +
vendor/golang.org/x/net/http2/Makefile | 3 +
vendor/golang.org/x/net/http2/README | 20 +
vendor/golang.org/x/net/http2/ciphers.go | 641 +
.../x/net/http2/client_conn_pool.go | 278 +
vendor/golang.org/x/net/http2/databuffer.go | 146 +
vendor/golang.org/x/net/http2/errors.go | 133 +
vendor/golang.org/x/net/http2/flow.go | 52 +
vendor/golang.org/x/net/http2/frame.go | 1614 +
vendor/golang.org/x/net/http2/go111.go | 29 +
vendor/golang.org/x/net/http2/gotrack.go | 170 +
vendor/golang.org/x/net/http2/headermap.go | 88 +
vendor/golang.org/x/net/http2/hpack/encode.go | 240 +
vendor/golang.org/x/net/http2/hpack/hpack.go | 504 +
.../golang.org/x/net/http2/hpack/huffman.go | 229 +
vendor/golang.org/x/net/http2/hpack/tables.go | 479 +
vendor/golang.org/x/net/http2/http2.go | 385 +
vendor/golang.org/x/net/http2/not_go111.go | 20 +
vendor/golang.org/x/net/http2/pipe.go | 168 +
vendor/golang.org/x/net/http2/server.go | 2968 +
vendor/golang.org/x/net/http2/transport.go | 2758 +
vendor/golang.org/x/net/http2/write.go | 365 +
vendor/golang.org/x/net/http2/writesched.go | 248 +
.../x/net/http2/writesched_priority.go | 452 +
.../x/net/http2/writesched_random.go | 77 +
vendor/golang.org/x/net/idna/idna10.0.0.go | 734 +
vendor/golang.org/x/net/idna/idna9.0.0.go | 682 +
vendor/golang.org/x/net/idna/punycode.go | 203 +
vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 ++
vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ++
vendor/golang.org/x/net/idna/tables12.0.0.go | 4733 ++
vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 ++
vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 +
vendor/golang.org/x/net/idna/trie.go | 72 +
vendor/golang.org/x/net/idna/trieval.go | 119 +
vendor/golang.org/x/oauth2/.travis.yml | 13 +
vendor/golang.org/x/oauth2/AUTHORS | 3 +
vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 +
vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 +
vendor/golang.org/x/oauth2/LICENSE | 27 +
vendor/golang.org/x/oauth2/README.md | 35 +
vendor/golang.org/x/oauth2/go.mod | 10 +
vendor/golang.org/x/oauth2/go.sum | 12 +
.../x/oauth2/internal/client_appengine.go | 13 +
vendor/golang.org/x/oauth2/internal/doc.go | 6 +
vendor/golang.org/x/oauth2/internal/oauth2.go | 37 +
vendor/golang.org/x/oauth2/internal/token.go | 294 +
.../golang.org/x/oauth2/internal/transport.go | 33 +
vendor/golang.org/x/oauth2/oauth2.go | 381 +
vendor/golang.org/x/oauth2/token.go | 178 +
vendor/golang.org/x/oauth2/transport.go | 89 +
vendor/golang.org/x/sync/AUTHORS | 3 +
vendor/golang.org/x/sync/CONTRIBUTORS | 3 +
vendor/golang.org/x/sync/LICENSE | 27 +
vendor/golang.org/x/sync/PATENTS | 22 +
vendor/golang.org/x/sync/errgroup/errgroup.go | 66 +
vendor/golang.org/x/sys/AUTHORS | 3 +
vendor/golang.org/x/sys/CONTRIBUTORS | 3 +
vendor/golang.org/x/sys/LICENSE | 27 +
vendor/golang.org/x/sys/PATENTS | 22 +
vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 +
vendor/golang.org/x/sys/cpu/byteorder.go | 65 +
vendor/golang.org/x/sys/cpu/cpu.go | 287 +
vendor/golang.org/x/sys/cpu/cpu_aix.go | 32 +
vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 +
vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 +
vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 +
vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 +
vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 +
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 +
.../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 +
.../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 +
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 +
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 26 +
vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 +
vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 +
.../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 +
.../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 23 +
.../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 +
.../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 31 +
.../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 +
vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 +
vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 +
.../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 +
vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 +
.../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 +
.../golang.org/x/sys/cpu/cpu_other_mips64x.go | 12 +
vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 +
vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 +
vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 +
vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 +
vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 +
vendor/golang.org/x/sys/cpu/cpu_x86.go | 135 +
vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 +
vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 +
vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 +
vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 +
.../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 +
.../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 +
vendor/golang.org/x/sys/execabs/execabs.go | 102 +
.../sys/internal/unsafeheader/unsafeheader.go | 30 +
vendor/golang.org/x/sys/plan9/asm.s | 8 +
vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 +
.../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 +
vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 +
vendor/golang.org/x/sys/plan9/const_plan9.go | 70 +
vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 +
vendor/golang.org/x/sys/plan9/env_plan9.go | 31 +
vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 +
vendor/golang.org/x/sys/plan9/mkall.sh | 150 +
vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 +
.../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 +
.../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 +
vendor/golang.org/x/sys/plan9/pwd_plan9.go | 23 +
vendor/golang.org/x/sys/plan9/race.go | 30 +
vendor/golang.org/x/sys/plan9/race0.go | 25 +
vendor/golang.org/x/sys/plan9/str.go | 22 +
vendor/golang.org/x/sys/plan9/syscall.go | 116 +
.../golang.org/x/sys/plan9/syscall_plan9.go | 349 +
.../x/sys/plan9/zsyscall_plan9_386.go | 284 +
.../x/sys/plan9/zsyscall_plan9_amd64.go | 284 +
.../x/sys/plan9/zsyscall_plan9_arm.go | 284 +
.../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 +
vendor/golang.org/x/sys/unix/.gitignore | 2 +
vendor/golang.org/x/sys/unix/README.md | 184 +
.../golang.org/x/sys/unix/affinity_linux.go | 86 +
vendor/golang.org/x/sys/unix/aliases.go | 14 +
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 +
vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 +
.../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 +
vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 +
.../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 +
.../x/sys/unix/asm_dragonfly_amd64.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_386.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 +
vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 +
.../golang.org/x/sys/unix/asm_linux_amd64.s | 57 +
vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 +
.../golang.org/x/sys/unix/asm_linux_arm64.s | 52 +
.../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 +
.../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 +
.../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 +
.../golang.org/x/sys/unix/asm_linux_riscv64.s | 47 +
.../golang.org/x/sys/unix/asm_linux_s390x.s | 56 +
vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 +
.../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 +
vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 +
.../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_386.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 +
.../x/sys/unix/asm_openbsd_mips64.s | 29 +
.../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 +
.../golang.org/x/sys/unix/bluetooth_linux.go | 36 +
vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 +
vendor/golang.org/x/sys/unix/constants.go | 13 +
vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 +
vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 +
vendor/golang.org/x/sys/unix/dev_darwin.go | 24 +
vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 +
vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 +
vendor/golang.org/x/sys/unix/dev_linux.go | 42 +
vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 +
vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 +
vendor/golang.org/x/sys/unix/dirent.go | 102 +
vendor/golang.org/x/sys/unix/endian_big.go | 9 +
vendor/golang.org/x/sys/unix/endian_little.go | 9 +
vendor/golang.org/x/sys/unix/env_unix.go | 31 +
.../x/sys/unix/errors_freebsd_386.go | 233 +
.../x/sys/unix/errors_freebsd_amd64.go | 233 +
.../x/sys/unix/errors_freebsd_arm.go | 226 +
.../x/sys/unix/errors_freebsd_arm64.go | 17 +
vendor/golang.org/x/sys/unix/fcntl.go | 36 +
vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 +
.../x/sys/unix/fcntl_linux_32bit.go | 13 +
vendor/golang.org/x/sys/unix/fdset.go | 29 +
vendor/golang.org/x/sys/unix/gccgo.go | 60 +
vendor/golang.org/x/sys/unix/gccgo_c.c | 45 +
.../x/sys/unix/gccgo_linux_amd64.go | 20 +
vendor/golang.org/x/sys/unix/ioctl.go | 74 +
vendor/golang.org/x/sys/unix/mkall.sh | 243 +
vendor/golang.org/x/sys/unix/mkerrors.sh | 726 +
vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 +
.../golang.org/x/sys/unix/pledge_openbsd.go | 163 +
vendor/golang.org/x/sys/unix/ptrace_darwin.go | 11 +
vendor/golang.org/x/sys/unix/ptrace_ios.go | 11 +
vendor/golang.org/x/sys/unix/race.go | 30 +
vendor/golang.org/x/sys/unix/race0.go | 25 +
.../x/sys/unix/readdirent_getdents.go | 12 +
.../x/sys/unix/readdirent_getdirentries.go | 19 +
.../x/sys/unix/sockcmsg_dragonfly.go | 16 +
.../golang.org/x/sys/unix/sockcmsg_linux.go | 36 +
vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 92 +
.../x/sys/unix/sockcmsg_unix_other.go | 42 +
vendor/golang.org/x/sys/unix/str.go | 26 +
vendor/golang.org/x/sys/unix/syscall.go | 94 +
vendor/golang.org/x/sys/unix/syscall_aix.go | 552 +
.../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 +
.../x/sys/unix/syscall_aix_ppc64.go | 85 +
vendor/golang.org/x/sys/unix/syscall_bsd.go | 663 +
.../x/sys/unix/syscall_darwin.1_12.go | 31 +
.../x/sys/unix/syscall_darwin.1_13.go | 107 +
.../golang.org/x/sys/unix/syscall_darwin.go | 668 +
.../x/sys/unix/syscall_darwin_386.go | 50 +
.../x/sys/unix/syscall_darwin_amd64.go | 50 +
.../x/sys/unix/syscall_darwin_arm.go | 51 +
.../x/sys/unix/syscall_darwin_arm64.go | 50 +
.../x/sys/unix/syscall_darwin_libSystem.go | 33 +
.../x/sys/unix/syscall_dragonfly.go | 541 +
.../x/sys/unix/syscall_dragonfly_amd64.go | 56 +
.../golang.org/x/sys/unix/syscall_freebsd.go | 863 +
.../x/sys/unix/syscall_freebsd_386.go | 66 +
.../x/sys/unix/syscall_freebsd_amd64.go | 66 +
.../x/sys/unix/syscall_freebsd_arm.go | 62 +
.../x/sys/unix/syscall_freebsd_arm64.go | 62 +
.../golang.org/x/sys/unix/syscall_illumos.go | 77 +
vendor/golang.org/x/sys/unix/syscall_linux.go | 2401 +
.../x/sys/unix/syscall_linux_386.go | 387 +
.../x/sys/unix/syscall_linux_amd64.go | 194 +
.../x/sys/unix/syscall_linux_amd64_gc.go | 13 +
.../x/sys/unix/syscall_linux_arm.go | 286 +
.../x/sys/unix/syscall_linux_arm64.go | 245 +
.../golang.org/x/sys/unix/syscall_linux_gc.go | 14 +
.../x/sys/unix/syscall_linux_gc_386.go | 16 +
.../x/sys/unix/syscall_linux_gc_arm.go | 13 +
.../x/sys/unix/syscall_linux_gccgo_386.go | 30 +
.../x/sys/unix/syscall_linux_gccgo_arm.go | 20 +
.../x/sys/unix/syscall_linux_mips64x.go | 230 +
.../x/sys/unix/syscall_linux_mipsx.go | 238 +
.../x/sys/unix/syscall_linux_ppc64x.go | 156 +
.../x/sys/unix/syscall_linux_riscv64.go | 230 +
.../x/sys/unix/syscall_linux_s390x.go | 342 +
.../x/sys/unix/syscall_linux_sparc64.go | 151 +
.../golang.org/x/sys/unix/syscall_netbsd.go | 603 +
.../x/sys/unix/syscall_netbsd_386.go | 37 +
.../x/sys/unix/syscall_netbsd_amd64.go | 37 +
.../x/sys/unix/syscall_netbsd_arm.go | 37 +
.../x/sys/unix/syscall_netbsd_arm64.go | 37 +
.../golang.org/x/sys/unix/syscall_openbsd.go | 390 +
.../x/sys/unix/syscall_openbsd_386.go | 41 +
.../x/sys/unix/syscall_openbsd_amd64.go | 41 +
.../x/sys/unix/syscall_openbsd_arm.go | 41 +
.../x/sys/unix/syscall_openbsd_arm64.go | 41 +
.../x/sys/unix/syscall_openbsd_mips64.go | 35 +
.../golang.org/x/sys/unix/syscall_solaris.go | 740 +
.../x/sys/unix/syscall_solaris_amd64.go | 27 +
vendor/golang.org/x/sys/unix/syscall_unix.go | 430 +
.../golang.org/x/sys/unix/syscall_unix_gc.go | 15 +
.../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 +
vendor/golang.org/x/sys/unix/timestruct.go | 76 +
.../golang.org/x/sys/unix/unveil_openbsd.go | 42 +
vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 +
.../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1384 +
.../x/sys/unix/zerrors_aix_ppc64.go | 1385 +
.../x/sys/unix/zerrors_darwin_386.go | 1788 +
.../x/sys/unix/zerrors_darwin_amd64.go | 1788 +
.../x/sys/unix/zerrors_darwin_arm.go | 1788 +
.../x/sys/unix/zerrors_darwin_arm64.go | 1788 +
.../x/sys/unix/zerrors_dragonfly_amd64.go | 1737 +
.../x/sys/unix/zerrors_freebsd_386.go | 1936 +
.../x/sys/unix/zerrors_freebsd_amd64.go | 1935 +
.../x/sys/unix/zerrors_freebsd_arm.go | 1825 +
.../x/sys/unix/zerrors_freebsd_arm64.go | 1936 +
vendor/golang.org/x/sys/unix/zerrors_linux.go | 2786 +
.../x/sys/unix/zerrors_linux_386.go | 790 +
.../x/sys/unix/zerrors_linux_amd64.go | 790 +
.../x/sys/unix/zerrors_linux_arm.go | 796 +
.../x/sys/unix/zerrors_linux_arm64.go | 786 +
.../x/sys/unix/zerrors_linux_mips.go | 797 +
.../x/sys/unix/zerrors_linux_mips64.go | 797 +
.../x/sys/unix/zerrors_linux_mips64le.go | 797 +
.../x/sys/unix/zerrors_linux_mipsle.go | 797 +
.../x/sys/unix/zerrors_linux_ppc64.go | 853 +
.../x/sys/unix/zerrors_linux_ppc64le.go | 853 +
.../x/sys/unix/zerrors_linux_riscv64.go | 777 +
.../x/sys/unix/zerrors_linux_s390x.go | 850 +
.../x/sys/unix/zerrors_linux_sparc64.go | 847 +
.../x/sys/unix/zerrors_netbsd_386.go | 1779 +
.../x/sys/unix/zerrors_netbsd_amd64.go | 1769 +
.../x/sys/unix/zerrors_netbsd_arm.go | 1758 +
.../x/sys/unix/zerrors_netbsd_arm64.go | 1769 +
.../x/sys/unix/zerrors_openbsd_386.go | 1664 +
.../x/sys/unix/zerrors_openbsd_amd64.go | 1774 +
.../x/sys/unix/zerrors_openbsd_arm.go | 1666 +
.../x/sys/unix/zerrors_openbsd_arm64.go | 1797 +
.../x/sys/unix/zerrors_openbsd_mips64.go | 1862 +
.../x/sys/unix/zerrors_solaris_amd64.go | 1553 +
.../x/sys/unix/zptrace_armnn_linux.go | 41 +
.../x/sys/unix/zptrace_linux_arm64.go | 17 +
.../x/sys/unix/zptrace_mipsnn_linux.go | 50 +
.../x/sys/unix/zptrace_mipsnnle_linux.go | 50 +
.../x/sys/unix/zptrace_x86_linux.go | 80 +
.../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 +
.../x/sys/unix/zsyscall_aix_ppc64.go | 1442 +
.../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 +
.../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 +
.../x/sys/unix/zsyscall_darwin_386.1_13.go | 39 +
.../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 +
.../x/sys/unix/zsyscall_darwin_386.go | 2430 +
.../x/sys/unix/zsyscall_darwin_386.s | 290 +
.../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 39 +
.../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 +
.../x/sys/unix/zsyscall_darwin_amd64.go | 2430 +
.../x/sys/unix/zsyscall_darwin_amd64.s | 290 +
.../x/sys/unix/zsyscall_darwin_arm.1_13.go | 39 +
.../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 +
.../x/sys/unix/zsyscall_darwin_arm.go | 2416 +
.../x/sys/unix/zsyscall_darwin_arm.s | 288 +
.../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 39 +
.../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 +
.../x/sys/unix/zsyscall_darwin_arm64.go | 2430 +
.../x/sys/unix/zsyscall_darwin_arm64.s | 290 +
.../x/sys/unix/zsyscall_dragonfly_amd64.go | 1676 +
.../x/sys/unix/zsyscall_freebsd_386.go | 2015 +
.../x/sys/unix/zsyscall_freebsd_amd64.go | 2015 +
.../x/sys/unix/zsyscall_freebsd_arm.go | 2015 +
.../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 +
.../x/sys/unix/zsyscall_illumos_amd64.go | 101 +
.../golang.org/x/sys/unix/zsyscall_linux.go | 1933 +
.../x/sys/unix/zsyscall_linux_386.go | 578 +
.../x/sys/unix/zsyscall_linux_amd64.go | 745 +
.../x/sys/unix/zsyscall_linux_arm.go | 715 +
.../x/sys/unix/zsyscall_linux_arm64.go | 602 +
.../x/sys/unix/zsyscall_linux_mips.go | 758 +
.../x/sys/unix/zsyscall_linux_mips64.go | 729 +
.../x/sys/unix/zsyscall_linux_mips64le.go | 729 +
.../x/sys/unix/zsyscall_linux_mipsle.go | 758 +
.../x/sys/unix/zsyscall_linux_ppc64.go | 807 +
.../x/sys/unix/zsyscall_linux_ppc64le.go | 807 +
.../x/sys/unix/zsyscall_linux_riscv64.go | 582 +
.../x/sys/unix/zsyscall_linux_s390x.go | 577 +
.../x/sys/unix/zsyscall_linux_sparc64.go | 740 +
.../x/sys/unix/zsyscall_netbsd_386.go | 1851 +
.../x/sys/unix/zsyscall_netbsd_amd64.go | 1851 +
.../x/sys/unix/zsyscall_netbsd_arm.go | 1851 +
.../x/sys/unix/zsyscall_netbsd_arm64.go | 1851 +
.../x/sys/unix/zsyscall_openbsd_386.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_arm.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_arm64.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_mips64.go | 1692 +
.../x/sys/unix/zsyscall_solaris_amd64.go | 1967 +
.../x/sys/unix/zsysctl_openbsd_386.go | 273 +
.../x/sys/unix/zsysctl_openbsd_amd64.go | 271 +
.../x/sys/unix/zsysctl_openbsd_arm.go | 273 +
.../x/sys/unix/zsysctl_openbsd_arm64.go | 275 +
.../x/sys/unix/zsysctl_openbsd_mips64.go | 279 +
.../x/sys/unix/zsysnum_darwin_386.go | 437 +
.../x/sys/unix/zsysnum_darwin_amd64.go | 439 +
.../x/sys/unix/zsysnum_darwin_arm.go | 437 +
.../x/sys/unix/zsysnum_darwin_arm64.go | 437 +
.../x/sys/unix/zsysnum_dragonfly_amd64.go | 316 +
.../x/sys/unix/zsysnum_freebsd_386.go | 396 +
.../x/sys/unix/zsysnum_freebsd_amd64.go | 396 +
.../x/sys/unix/zsysnum_freebsd_arm.go | 396 +
.../x/sys/unix/zsysnum_freebsd_arm64.go | 396 +
.../x/sys/unix/zsysnum_linux_386.go | 439 +
.../x/sys/unix/zsysnum_linux_amd64.go | 361 +
.../x/sys/unix/zsysnum_linux_arm.go | 403 +
.../x/sys/unix/zsysnum_linux_arm64.go | 306 +
.../x/sys/unix/zsysnum_linux_mips.go | 424 +
.../x/sys/unix/zsysnum_linux_mips64.go | 354 +
.../x/sys/unix/zsysnum_linux_mips64le.go | 354 +
.../x/sys/unix/zsysnum_linux_mipsle.go | 424 +
.../x/sys/unix/zsysnum_linux_ppc64.go | 403 +
.../x/sys/unix/zsysnum_linux_ppc64le.go | 403 +
.../x/sys/unix/zsysnum_linux_riscv64.go | 305 +
.../x/sys/unix/zsysnum_linux_s390x.go | 368 +
.../x/sys/unix/zsysnum_linux_sparc64.go | 382 +
.../x/sys/unix/zsysnum_netbsd_386.go | 274 +
.../x/sys/unix/zsysnum_netbsd_amd64.go | 274 +
.../x/sys/unix/zsysnum_netbsd_arm.go | 274 +
.../x/sys/unix/zsysnum_netbsd_arm64.go | 274 +
.../x/sys/unix/zsysnum_openbsd_386.go | 218 +
.../x/sys/unix/zsysnum_openbsd_amd64.go | 218 +
.../x/sys/unix/zsysnum_openbsd_arm.go | 218 +
.../x/sys/unix/zsysnum_openbsd_arm64.go | 217 +
.../x/sys/unix/zsysnum_openbsd_mips64.go | 220 +
.../golang.org/x/sys/unix/ztypes_aix_ppc.go | 353 +
.../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 357 +
.../x/sys/unix/ztypes_darwin_386.go | 516 +
.../x/sys/unix/ztypes_darwin_amd64.go | 521 +
.../x/sys/unix/ztypes_darwin_arm.go | 516 +
.../x/sys/unix/ztypes_darwin_arm64.go | 521 +
.../x/sys/unix/ztypes_dragonfly_amd64.go | 470 +
.../x/sys/unix/ztypes_freebsd_386.go | 710 +
.../x/sys/unix/ztypes_freebsd_amd64.go | 713 +
.../x/sys/unix/ztypes_freebsd_arm.go | 694 +
.../x/sys/unix/ztypes_freebsd_arm64.go | 691 +
vendor/golang.org/x/sys/unix/ztypes_linux.go | 3682 +
.../golang.org/x/sys/unix/ztypes_linux_386.go | 619 +
.../x/sys/unix/ztypes_linux_amd64.go | 637 +
.../golang.org/x/sys/unix/ztypes_linux_arm.go | 614 +
.../x/sys/unix/ztypes_linux_arm64.go | 616 +
.../x/sys/unix/ztypes_linux_mips.go | 620 +
.../x/sys/unix/ztypes_linux_mips64.go | 619 +
.../x/sys/unix/ztypes_linux_mips64le.go | 619 +
.../x/sys/unix/ztypes_linux_mipsle.go | 620 +
.../x/sys/unix/ztypes_linux_ppc64.go | 626 +
.../x/sys/unix/ztypes_linux_ppc64le.go | 626 +
.../x/sys/unix/ztypes_linux_riscv64.go | 644 +
.../x/sys/unix/ztypes_linux_s390x.go | 640 +
.../x/sys/unix/ztypes_linux_sparc64.go | 621 +
.../x/sys/unix/ztypes_netbsd_386.go | 499 +
.../x/sys/unix/ztypes_netbsd_amd64.go | 507 +
.../x/sys/unix/ztypes_netbsd_arm.go | 504 +
.../x/sys/unix/ztypes_netbsd_arm64.go | 507 +
.../x/sys/unix/ztypes_openbsd_386.go | 572 +
.../x/sys/unix/ztypes_openbsd_amd64.go | 572 +
.../x/sys/unix/ztypes_openbsd_arm.go | 573 +
.../x/sys/unix/ztypes_openbsd_arm64.go | 566 +
.../x/sys/unix/ztypes_openbsd_mips64.go | 566 +
.../x/sys/unix/ztypes_solaris_amd64.go | 441 +
vendor/golang.org/x/sys/windows/aliases.go | 13 +
.../golang.org/x/sys/windows/dll_windows.go | 416 +
vendor/golang.org/x/sys/windows/empty.s | 8 +
.../golang.org/x/sys/windows/env_windows.go | 54 +
vendor/golang.org/x/sys/windows/eventlog.go | 20 +
.../golang.org/x/sys/windows/exec_windows.go | 97 +
.../x/sys/windows/memory_windows.go | 37 +
vendor/golang.org/x/sys/windows/mkerrors.bash | 63 +
.../x/sys/windows/mkknownfolderids.bash | 27 +
vendor/golang.org/x/sys/windows/mksyscall.go | 9 +
vendor/golang.org/x/sys/windows/race.go | 30 +
vendor/golang.org/x/sys/windows/race0.go | 25 +
.../x/sys/windows/security_windows.go | 1418 +
vendor/golang.org/x/sys/windows/service.go | 237 +
.../x/sys/windows/setupapierrors_windows.go | 100 +
vendor/golang.org/x/sys/windows/str.go | 22 +
vendor/golang.org/x/sys/windows/syscall.go | 112 +
.../x/sys/windows/syscall_windows.go | 1499 +
.../golang.org/x/sys/windows/types_windows.go | 1863 +
.../x/sys/windows/types_windows_386.go | 35 +
.../x/sys/windows/types_windows_amd64.go | 34 +
.../x/sys/windows/types_windows_arm.go | 35 +
.../x/sys/windows/zerrors_windows.go | 6853 ++
.../x/sys/windows/zknownfolderids_windows.go | 149 +
.../x/sys/windows/zsyscall_windows.go | 3262 +
vendor/golang.org/x/term/AUTHORS | 3 +
vendor/golang.org/x/term/CONTRIBUTING.md | 26 +
vendor/golang.org/x/term/CONTRIBUTORS | 3 +
vendor/golang.org/x/term/LICENSE | 27 +
vendor/golang.org/x/term/PATENTS | 22 +
vendor/golang.org/x/term/README.md | 19 +
vendor/golang.org/x/term/go.mod | 5 +
vendor/golang.org/x/term/go.sum | 2 +
vendor/golang.org/x/term/term.go | 58 +
vendor/golang.org/x/term/term_plan9.go | 42 +
vendor/golang.org/x/term/term_solaris.go | 111 +
vendor/golang.org/x/term/term_unix.go | 91 +
vendor/golang.org/x/term/term_unix_aix.go | 10 +
vendor/golang.org/x/term/term_unix_bsd.go | 12 +
vendor/golang.org/x/term/term_unix_linux.go | 10 +
vendor/golang.org/x/term/term_unix_zos.go | 10 +
vendor/golang.org/x/term/term_unsupported.go | 38 +
vendor/golang.org/x/term/term_windows.go | 79 +
vendor/golang.org/x/term/terminal.go | 987 +
vendor/golang.org/x/text/AUTHORS | 3 +
vendor/golang.org/x/text/CONTRIBUTORS | 3 +
vendor/golang.org/x/text/LICENSE | 27 +
vendor/golang.org/x/text/PATENTS | 22 +
.../x/text/secure/bidirule/bidirule.go | 336 +
.../x/text/secure/bidirule/bidirule10.0.0.go | 11 +
.../x/text/secure/bidirule/bidirule9.0.0.go | 14 +
.../golang.org/x/text/transform/transform.go | 709 +
vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 +
.../golang.org/x/text/unicode/bidi/bracket.go | 335 +
vendor/golang.org/x/text/unicode/bidi/core.go | 1058 +
vendor/golang.org/x/text/unicode/bidi/prop.go | 206 +
.../x/text/unicode/bidi/tables10.0.0.go | 1815 +
.../x/text/unicode/bidi/tables11.0.0.go | 1887 +
.../x/text/unicode/bidi/tables12.0.0.go | 1923 +
.../x/text/unicode/bidi/tables9.0.0.go | 1781 +
.../golang.org/x/text/unicode/bidi/trieval.go | 60 +
.../x/text/unicode/norm/composition.go | 512 +
.../x/text/unicode/norm/forminfo.go | 278 +
.../golang.org/x/text/unicode/norm/input.go | 109 +
vendor/golang.org/x/text/unicode/norm/iter.go | 458 +
.../x/text/unicode/norm/normalize.go | 609 +
.../x/text/unicode/norm/readwriter.go | 125 +
.../x/text/unicode/norm/tables10.0.0.go | 7657 ++
.../x/text/unicode/norm/tables11.0.0.go | 7693 ++
.../x/text/unicode/norm/tables12.0.0.go | 7710 ++
.../x/text/unicode/norm/tables9.0.0.go | 7637 ++
.../x/text/unicode/norm/transform.go | 88 +
vendor/golang.org/x/text/unicode/norm/trie.go | 54 +
vendor/golang.org/x/text/width/kind_string.go | 28 +
.../golang.org/x/text/width/tables10.0.0.go | 1318 +
.../golang.org/x/text/width/tables11.0.0.go | 1330 +
.../golang.org/x/text/width/tables12.0.0.go | 1350 +
vendor/golang.org/x/text/width/tables9.0.0.go | 1286 +
vendor/golang.org/x/text/width/transform.go | 239 +
vendor/golang.org/x/text/width/trieval.go | 30 +
vendor/golang.org/x/text/width/width.go | 206 +
vendor/golang.org/x/time/AUTHORS | 3 +
vendor/golang.org/x/time/CONTRIBUTORS | 3 +
vendor/golang.org/x/time/LICENSE | 27 +
vendor/golang.org/x/time/PATENTS | 22 +
vendor/golang.org/x/time/rate/rate.go | 402 +
vendor/golang.org/x/tools/AUTHORS | 3 +
vendor/golang.org/x/tools/CONTRIBUTORS | 3 +
vendor/golang.org/x/tools/LICENSE | 27 +
vendor/golang.org/x/tools/PATENTS | 22 +
.../x/tools/go/ast/astutil/enclosing.go | 627 +
.../x/tools/go/ast/astutil/imports.go | 482 +
.../x/tools/go/ast/astutil/rewrite.go | 477 +
.../golang.org/x/tools/go/ast/astutil/util.go | 18 +
.../x/tools/go/gcexportdata/gcexportdata.go | 109 +
.../x/tools/go/gcexportdata/importer.go | 73 +
.../x/tools/go/internal/gcimporter/bexport.go | 852 +
.../x/tools/go/internal/gcimporter/bimport.go | 1039 +
.../go/internal/gcimporter/exportdata.go | 93 +
.../go/internal/gcimporter/gcimporter.go | 1078 +
.../x/tools/go/internal/gcimporter/iexport.go | 739 +
.../x/tools/go/internal/gcimporter/iimport.go | 630 +
.../go/internal/gcimporter/newInterface10.go | 21 +
.../go/internal/gcimporter/newInterface11.go | 13 +
.../tools/go/internal/packagesdriver/sizes.go | 49 +
vendor/golang.org/x/tools/go/packages/doc.go | 221 +
.../x/tools/go/packages/external.go | 101 +
.../golang.org/x/tools/go/packages/golist.go | 1096 +
.../x/tools/go/packages/golist_overlay.go | 575 +
.../x/tools/go/packages/loadmode_string.go | 57 +
.../x/tools/go/packages/packages.go | 1233 +
.../golang.org/x/tools/go/packages/visit.go | 59 +
vendor/golang.org/x/tools/imports/forward.go | 77 +
.../x/tools/internal/event/core/event.go | 85 +
.../x/tools/internal/event/core/export.go | 70 +
.../x/tools/internal/event/core/fast.go | 77 +
.../golang.org/x/tools/internal/event/doc.go | 7 +
.../x/tools/internal/event/event.go | 127 +
.../x/tools/internal/event/keys/keys.go | 564 +
.../x/tools/internal/event/keys/standard.go | 22 +
.../x/tools/internal/event/label/label.go | 213 +
.../x/tools/internal/fastwalk/fastwalk.go | 196 +
.../fastwalk/fastwalk_dirent_fileno.go | 13 +
.../internal/fastwalk/fastwalk_dirent_ino.go | 14 +
.../fastwalk/fastwalk_dirent_namlen_bsd.go | 13 +
.../fastwalk/fastwalk_dirent_namlen_linux.go | 29 +
.../internal/fastwalk/fastwalk_portable.go | 37 +
.../tools/internal/fastwalk/fastwalk_unix.go | 128 +
.../x/tools/internal/gocommand/invoke.go | 273 +
.../x/tools/internal/gocommand/vendor.go | 102 +
.../x/tools/internal/gocommand/version.go | 51 +
.../x/tools/internal/gopathwalk/walk.go | 264 +
.../x/tools/internal/imports/fix.go | 1730 +
.../x/tools/internal/imports/imports.go | 346 +
.../x/tools/internal/imports/mod.go | 693 +
.../x/tools/internal/imports/mod_cache.go | 236 +
.../x/tools/internal/imports/sortimports.go | 280 +
.../x/tools/internal/imports/zstdlib.go | 10516 +++
.../internal/packagesinternal/packages.go | 21 +
.../tools/internal/typesinternal/errorcode.go | 1358 +
.../typesinternal/errorcode_string.go | 152 +
.../x/tools/internal/typesinternal/types.go | 45 +
vendor/golang.org/x/xerrors/LICENSE | 27 +
vendor/golang.org/x/xerrors/PATENTS | 22 +
vendor/golang.org/x/xerrors/README | 2 +
vendor/golang.org/x/xerrors/adaptor.go | 193 +
vendor/golang.org/x/xerrors/codereview.cfg | 1 +
vendor/golang.org/x/xerrors/doc.go | 22 +
vendor/golang.org/x/xerrors/errors.go | 33 +
vendor/golang.org/x/xerrors/fmt.go | 187 +
vendor/golang.org/x/xerrors/format.go | 34 +
vendor/golang.org/x/xerrors/frame.go | 56 +
vendor/golang.org/x/xerrors/go.mod | 3 +
.../golang.org/x/xerrors/internal/internal.go | 8 +
vendor/golang.org/x/xerrors/wrap.go | 106 +
vendor/gomodules.xyz/jsonpatch/v2/LICENSE | 202 +
vendor/gomodules.xyz/jsonpatch/v2/go.mod | 9 +
vendor/gomodules.xyz/jsonpatch/v2/go.sum | 11 +
.../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 336 +
vendor/google.golang.org/appengine/LICENSE | 202 +
.../appengine/internal/api.go | 678 +
.../appengine/internal/api_classic.go | 169 +
.../appengine/internal/api_common.go | 123 +
.../appengine/internal/app_id.go | 28 +
.../appengine/internal/base/api_base.pb.go | 308 +
.../appengine/internal/base/api_base.proto | 33 +
.../internal/datastore/datastore_v3.pb.go | 4367 +
.../internal/datastore/datastore_v3.proto | 551 +
.../appengine/internal/identity.go | 55 +
.../appengine/internal/identity_classic.go | 61 +
.../appengine/internal/identity_flex.go | 11 +
.../appengine/internal/identity_vm.go | 134 +
.../appengine/internal/internal.go | 110 +
.../appengine/internal/log/log_service.pb.go | 1313 +
.../appengine/internal/log/log_service.proto | 150 +
.../appengine/internal/main.go | 16 +
.../appengine/internal/main_common.go | 7 +
.../appengine/internal/main_vm.go | 69 +
.../appengine/internal/metadata.go | 60 +
.../appengine/internal/net.go | 56 +
.../appengine/internal/regen.sh | 40 +
.../internal/remote_api/remote_api.pb.go | 361 +
.../internal/remote_api/remote_api.proto | 44 +
.../appengine/internal/transaction.go | 115 +
.../internal/urlfetch/urlfetch_service.pb.go | 527 +
.../internal/urlfetch/urlfetch_service.proto | 64 +
.../appengine/urlfetch/urlfetch.go | 210 +
vendor/google.golang.org/protobuf/AUTHORS | 3 +
.../google.golang.org/protobuf/CONTRIBUTORS | 3 +
vendor/google.golang.org/protobuf/LICENSE | 27 +
vendor/google.golang.org/protobuf/PATENTS | 22 +
.../protobuf/encoding/prototext/decode.go | 796 +
.../protobuf/encoding/prototext/doc.go | 7 +
.../protobuf/encoding/prototext/encode.go | 433 +
.../protobuf/encoding/protowire/wire.go | 538 +
.../protobuf/internal/descfmt/stringer.go | 316 +
.../protobuf/internal/descopts/options.go | 29 +
.../protobuf/internal/detrand/rand.go | 61 +
.../internal/encoding/defval/default.go | 213 +
.../encoding/messageset/messageset.go | 258 +
.../protobuf/internal/encoding/tag/tag.go | 207 +
.../protobuf/internal/encoding/text/decode.go | 665 +
.../internal/encoding/text/decode_number.go | 190 +
.../internal/encoding/text/decode_string.go | 161 +
.../internal/encoding/text/decode_token.go | 373 +
.../protobuf/internal/encoding/text/doc.go | 29 +
.../protobuf/internal/encoding/text/encode.go | 267 +
.../protobuf/internal/errors/errors.go | 89 +
.../protobuf/internal/errors/is_go112.go | 39 +
.../protobuf/internal/errors/is_go113.go | 12 +
.../protobuf/internal/fieldnum/any_gen.go | 13 +
.../protobuf/internal/fieldnum/api_gen.go | 35 +
.../internal/fieldnum/descriptor_gen.go | 240 +
.../protobuf/internal/fieldnum/doc.go | 7 +
.../internal/fieldnum/duration_gen.go | 13 +
.../protobuf/internal/fieldnum/empty_gen.go | 10 +
.../internal/fieldnum/field_mask_gen.go | 12 +
.../internal/fieldnum/source_context_gen.go | 12 +
.../protobuf/internal/fieldnum/struct_gen.go | 33 +
.../internal/fieldnum/timestamp_gen.go | 13 +
.../protobuf/internal/fieldnum/type_gen.go | 53 +
.../internal/fieldnum/wrappers_gen.go | 52 +
.../protobuf/internal/fieldsort/fieldsort.go | 40 +
.../protobuf/internal/filedesc/build.go | 155 +
.../protobuf/internal/filedesc/desc.go | 613 +
.../protobuf/internal/filedesc/desc_init.go | 471 +
.../protobuf/internal/filedesc/desc_lazy.go | 704 +
.../protobuf/internal/filedesc/desc_list.go | 286 +
.../internal/filedesc/desc_list_gen.go | 345 +
.../protobuf/internal/filedesc/placeholder.go | 107 +
.../protobuf/internal/filetype/build.go | 297 +
.../protobuf/internal/flags/flags.go | 24 +
.../internal/flags/proto_legacy_disable.go | 9 +
.../internal/flags/proto_legacy_enable.go | 9 +
.../protobuf/internal/genname/name.go | 25 +
.../protobuf/internal/impl/api_export.go | 170 +
.../protobuf/internal/impl/checkinit.go | 141 +
.../protobuf/internal/impl/codec_extension.go | 223 +
.../protobuf/internal/impl/codec_field.go | 828 +
.../protobuf/internal/impl/codec_gen.go | 5637 ++
.../protobuf/internal/impl/codec_map.go | 388 +
.../protobuf/internal/impl/codec_map_go111.go | 37 +
.../protobuf/internal/impl/codec_map_go112.go | 11 +
.../protobuf/internal/impl/codec_message.go | 159 +
.../internal/impl/codec_messageset.go | 120 +
.../protobuf/internal/impl/codec_reflect.go | 209 +
.../protobuf/internal/impl/codec_tables.go | 557 +
.../protobuf/internal/impl/codec_unsafe.go | 17 +
.../protobuf/internal/impl/convert.go | 467 +
.../protobuf/internal/impl/convert_list.go | 141 +
.../protobuf/internal/impl/convert_map.go | 121 +
.../protobuf/internal/impl/decode.go | 274 +
.../protobuf/internal/impl/encode.go | 199 +
.../protobuf/internal/impl/enum.go | 21 +
.../protobuf/internal/impl/extension.go | 156 +
.../protobuf/internal/impl/legacy_enum.go | 219 +
.../protobuf/internal/impl/legacy_export.go | 92 +
.../internal/impl/legacy_extension.go | 175 +
.../protobuf/internal/impl/legacy_file.go | 81 +
.../protobuf/internal/impl/legacy_message.go | 502 +
.../protobuf/internal/impl/merge.go | 176 +
.../protobuf/internal/impl/merge_gen.go | 209 +
.../protobuf/internal/impl/message.go | 215 +
.../protobuf/internal/impl/message_reflect.go | 364 +
.../internal/impl/message_reflect_field.go | 466 +
.../internal/impl/message_reflect_gen.go | 249 +
.../protobuf/internal/impl/pointer_reflect.go | 177 +
.../protobuf/internal/impl/pointer_unsafe.go | 173 +
.../protobuf/internal/impl/validate.go | 575 +
.../protobuf/internal/impl/weak.go | 74 +
.../protobuf/internal/mapsort/mapsort.go | 43 +
.../protobuf/internal/pragma/pragma.go | 29 +
.../protobuf/internal/set/ints.go | 58 +
.../protobuf/internal/strs/strings.go | 196 +
.../protobuf/internal/strs/strings_pure.go | 27 +
.../protobuf/internal/strs/strings_unsafe.go | 94 +
.../protobuf/internal/version/version.go | 79 +
.../protobuf/proto/checkinit.go | 71 +
.../protobuf/proto/decode.go | 273 +
.../protobuf/proto/decode_gen.go | 603 +
.../google.golang.org/protobuf/proto/doc.go | 94 +
.../protobuf/proto/encode.go | 346 +
.../protobuf/proto/encode_gen.go | 97 +
.../google.golang.org/protobuf/proto/equal.go | 154 +
.../protobuf/proto/extension.go | 92 +
.../google.golang.org/protobuf/proto/merge.go | 139 +
.../protobuf/proto/messageset.go | 88 +
.../google.golang.org/protobuf/proto/proto.go | 34 +
.../protobuf/proto/proto_methods.go | 19 +
.../protobuf/proto/proto_reflect.go | 19 +
.../google.golang.org/protobuf/proto/reset.go | 43 +
.../google.golang.org/protobuf/proto/size.go | 97 +
.../protobuf/proto/size_gen.go | 55 +
.../protobuf/proto/wrappers.go | 29 +
.../protobuf/reflect/protoreflect/methods.go | 77 +
.../protobuf/reflect/protoreflect/proto.go | 478 +
.../protobuf/reflect/protoreflect/source.go | 52 +
.../protobuf/reflect/protoreflect/type.go | 631 +
.../protobuf/reflect/protoreflect/value.go | 285 +
.../reflect/protoreflect/value_pure.go | 59 +
.../reflect/protoreflect/value_union.go | 411 +
.../reflect/protoreflect/value_unsafe.go | 98 +
.../reflect/protoregistry/registry.go | 800 +
.../protobuf/runtime/protoiface/legacy.go | 15 +
.../protobuf/runtime/protoiface/methods.go | 167 +
.../protobuf/runtime/protoimpl/impl.go | 44 +
.../protobuf/runtime/protoimpl/version.go | 56 +
.../protobuf/types/known/anypb/any.pb.go | 287 +
.../types/known/durationpb/duration.pb.go | 249 +
.../types/known/timestamppb/timestamp.pb.go | 271 +
vendor/gopkg.in/inf.v0/LICENSE | 28 +
vendor/gopkg.in/inf.v0/dec.go | 615 +
vendor/gopkg.in/inf.v0/rounder.go | 145 +
vendor/gopkg.in/tomb.v1/LICENSE | 29 +
vendor/gopkg.in/tomb.v1/README.md | 4 +
vendor/gopkg.in/tomb.v1/tomb.go | 176 +
vendor/gopkg.in/yaml.v2/.travis.yml | 16 +
vendor/gopkg.in/yaml.v2/LICENSE | 201 +
vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 +
vendor/gopkg.in/yaml.v2/NOTICE | 13 +
vendor/gopkg.in/yaml.v2/README.md | 133 +
vendor/gopkg.in/yaml.v2/apic.go | 740 +
vendor/gopkg.in/yaml.v2/decode.go | 815 +
vendor/gopkg.in/yaml.v2/emitterc.go | 1685 +
vendor/gopkg.in/yaml.v2/encode.go | 390 +
vendor/gopkg.in/yaml.v2/go.mod | 5 +
vendor/gopkg.in/yaml.v2/parserc.go | 1095 +
vendor/gopkg.in/yaml.v2/readerc.go | 412 +
vendor/gopkg.in/yaml.v2/resolve.go | 258 +
vendor/gopkg.in/yaml.v2/scannerc.go | 2711 +
vendor/gopkg.in/yaml.v2/sorter.go | 113 +
vendor/gopkg.in/yaml.v2/writerc.go | 26 +
vendor/gopkg.in/yaml.v2/yaml.go | 466 +
vendor/gopkg.in/yaml.v2/yamlh.go | 739 +
vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 +
vendor/gopkg.in/yaml.v3/.travis.yml | 17 +
vendor/gopkg.in/yaml.v3/LICENSE | 50 +
vendor/gopkg.in/yaml.v3/NOTICE | 13 +
vendor/gopkg.in/yaml.v3/README.md | 150 +
vendor/gopkg.in/yaml.v3/apic.go | 747 +
vendor/gopkg.in/yaml.v3/decode.go | 948 +
vendor/gopkg.in/yaml.v3/emitterc.go | 2022 +
vendor/gopkg.in/yaml.v3/encode.go | 572 +
vendor/gopkg.in/yaml.v3/go.mod | 5 +
vendor/gopkg.in/yaml.v3/parserc.go | 1249 +
vendor/gopkg.in/yaml.v3/readerc.go | 434 +
vendor/gopkg.in/yaml.v3/resolve.go | 326 +
vendor/gopkg.in/yaml.v3/scannerc.go | 3028 +
vendor/gopkg.in/yaml.v3/sorter.go | 134 +
vendor/gopkg.in/yaml.v3/writerc.go | 48 +
vendor/gopkg.in/yaml.v3/yaml.go | 693 +
vendor/gopkg.in/yaml.v3/yamlh.go | 807 +
vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 +
vendor/k8s.io/api/LICENSE | 202 +
vendor/k8s.io/api/admission/v1/doc.go | 23 +
.../k8s.io/api/admission/v1/generated.pb.go | 1792 +
.../k8s.io/api/admission/v1/generated.proto | 167 +
vendor/k8s.io/api/admission/v1/register.go | 51 +
vendor/k8s.io/api/admission/v1/types.go | 169 +
.../v1/types_swagger_doc_generated.go | 78 +
.../api/admission/v1/zz_generated.deepcopy.go | 141 +
vendor/k8s.io/api/admission/v1beta1/doc.go | 24 +
.../api/admission/v1beta1/generated.pb.go | 1792 +
.../api/admission/v1beta1/generated.proto | 167 +
.../k8s.io/api/admission/v1beta1/register.go | 51 +
vendor/k8s.io/api/admission/v1beta1/types.go | 174 +
.../v1beta1/types_swagger_doc_generated.go | 78 +
.../v1beta1/zz_generated.deepcopy.go | 141 +
.../zz_generated.prerelease-lifecycle.go | 49 +
.../api/admissionregistration/v1/doc.go | 26 +
.../admissionregistration/v1/generated.pb.go | 3443 +
.../admissionregistration/v1/generated.proto | 479 +
.../api/admissionregistration/v1/register.go | 53 +
.../api/admissionregistration/v1/types.go | 551 +
.../v1/types_swagger_doc_generated.go | 151 +
.../v1/zz_generated.deepcopy.go | 396 +
.../api/admissionregistration/v1beta1/doc.go | 27 +
.../v1beta1/generated.pb.go | 3444 +
.../v1beta1/generated.proto | 487 +
.../admissionregistration/v1beta1/register.go | 53 +
.../admissionregistration/v1beta1/types.go | 575 +
.../v1beta1/types_swagger_doc_generated.go | 151 +
.../v1beta1/zz_generated.deepcopy.go | 396 +
.../zz_generated.prerelease-lifecycle.go | 121 +
vendor/k8s.io/api/apps/v1/doc.go | 21 +
vendor/k8s.io/api/apps/v1/generated.pb.go | 8238 ++
vendor/k8s.io/api/apps/v1/generated.proto | 701 +
vendor/k8s.io/api/apps/v1/register.go | 60 +
vendor/k8s.io/api/apps/v1/types.go | 826 +
.../apps/v1/types_swagger_doc_generated.go | 365 +
.../api/apps/v1/zz_generated.deepcopy.go | 772 +
vendor/k8s.io/api/apps/v1beta1/doc.go | 22 +
.../k8s.io/api/apps/v1beta1/generated.pb.go | 6246 ++
.../k8s.io/api/apps/v1beta1/generated.proto | 484 +
vendor/k8s.io/api/apps/v1beta1/register.go | 58 +
vendor/k8s.io/api/apps/v1beta1/types.go | 599 +
.../v1beta1/types_swagger_doc_generated.go | 273 +
.../api/apps/v1beta1/zz_generated.deepcopy.go | 594 +
.../zz_generated.prerelease-lifecycle.go | 217 +
vendor/k8s.io/api/apps/v1beta2/doc.go | 22 +
.../k8s.io/api/apps/v1beta2/generated.pb.go | 9014 +++
.../k8s.io/api/apps/v1beta2/generated.proto | 752 +
vendor/k8s.io/api/apps/v1beta2/register.go | 61 +
vendor/k8s.io/api/apps/v1beta2/types.go | 920 +
.../v1beta2/types_swagger_doc_generated.go | 396 +
.../api/apps/v1beta2/zz_generated.deepcopy.go | 839 +
.../zz_generated.prerelease-lifecycle.go | 289 +
vendor/k8s.io/api/authentication/v1/doc.go | 22 +
.../api/authentication/v1/generated.pb.go | 2581 +
.../api/authentication/v1/generated.proto | 182 +
.../k8s.io/api/authentication/v1/register.go | 52 +
vendor/k8s.io/api/authentication/v1/types.go | 189 +
.../v1/types_swagger_doc_generated.go | 115 +
.../v1/zz_generated.deepcopy.go | 244 +
.../k8s.io/api/authentication/v1beta1/doc.go | 23 +
.../authentication/v1beta1/generated.pb.go | 1558 +
.../authentication/v1beta1/generated.proto | 118 +
.../api/authentication/v1beta1/register.go | 51 +
.../api/authentication/v1beta1/types.go | 113 +
.../v1beta1/types_swagger_doc_generated.go | 74 +
.../v1beta1/zz_generated.deepcopy.go | 152 +
.../zz_generated.prerelease-lifecycle.go | 49 +
vendor/k8s.io/api/authorization/v1/doc.go | 23 +
.../api/authorization/v1/generated.pb.go | 4087 +
.../api/authorization/v1/generated.proto | 272 +
.../k8s.io/api/authorization/v1/register.go | 55 +
vendor/k8s.io/api/authorization/v1/types.go | 268 +
.../v1/types_swagger_doc_generated.go | 173 +
.../authorization/v1/zz_generated.deepcopy.go | 385 +
.../k8s.io/api/authorization/v1beta1/doc.go | 24 +
.../api/authorization/v1beta1/generated.pb.go | 4087 +
.../api/authorization/v1beta1/generated.proto | 272 +
.../api/authorization/v1beta1/register.go | 55 +
.../k8s.io/api/authorization/v1beta1/types.go | 280 +
.../v1beta1/types_swagger_doc_generated.go | 173 +
.../v1beta1/zz_generated.deepcopy.go | 385 +
.../zz_generated.prerelease-lifecycle.go | 121 +
vendor/k8s.io/api/autoscaling/v1/doc.go | 21 +
.../k8s.io/api/autoscaling/v1/generated.pb.go | 5570 ++
.../k8s.io/api/autoscaling/v1/generated.proto | 419 +
vendor/k8s.io/api/autoscaling/v1/register.go | 53 +
vendor/k8s.io/api/autoscaling/v1/types.go | 432 +
.../v1/types_swagger_doc_generated.go | 250 +
.../autoscaling/v1/zz_generated.deepcopy.go | 515 +
vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 22 +
.../api/autoscaling/v2beta1/generated.pb.go | 5095 ++
.../api/autoscaling/v2beta1/generated.proto | 400 +
.../api/autoscaling/v2beta1/register.go | 52 +
.../k8s.io/api/autoscaling/v2beta1/types.go | 414 +
.../v2beta1/types_swagger_doc_generated.go | 221 +
.../v2beta1/zz_generated.deepcopy.go | 466 +
.../zz_generated.prerelease-lifecycle.go | 73 +
vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 22 +
.../api/autoscaling/v2beta2/generated.pb.go | 6062 ++
.../api/autoscaling/v2beta2/generated.proto | 438 +
.../api/autoscaling/v2beta2/register.go | 50 +
.../k8s.io/api/autoscaling/v2beta2/types.go | 484 +
.../v2beta2/types_swagger_doc_generated.go | 273 +
.../v2beta2/zz_generated.deepcopy.go | 565 +
.../zz_generated.prerelease-lifecycle.go | 57 +
vendor/k8s.io/api/batch/v1/doc.go | 21 +
vendor/k8s.io/api/batch/v1/generated.pb.go | 1854 +
vendor/k8s.io/api/batch/v1/generated.proto | 184 +
vendor/k8s.io/api/batch/v1/register.go | 52 +
vendor/k8s.io/api/batch/v1/types.go | 193 +
.../batch/v1/types_swagger_doc_generated.go | 95 +
.../api/batch/v1/zz_generated.deepcopy.go | 188 +
vendor/k8s.io/api/batch/v1beta1/doc.go | 22 +
.../k8s.io/api/batch/v1beta1/generated.pb.go | 1743 +
.../k8s.io/api/batch/v1beta1/generated.proto | 137 +
vendor/k8s.io/api/batch/v1beta1/register.go | 53 +
vendor/k8s.io/api/batch/v1beta1/types.go | 164 +
.../v1beta1/types_swagger_doc_generated.go | 96 +
.../batch/v1beta1/zz_generated.deepcopy.go | 194 +
.../zz_generated.prerelease-lifecycle.go | 75 +
vendor/k8s.io/api/batch/v2alpha1/doc.go | 21 +
.../k8s.io/api/batch/v2alpha1/generated.pb.go | 1743 +
.../k8s.io/api/batch/v2alpha1/generated.proto | 135 +
vendor/k8s.io/api/batch/v2alpha1/register.go | 53 +
vendor/k8s.io/api/batch/v2alpha1/types.go | 156 +
.../v2alpha1/types_swagger_doc_generated.go | 96 +
.../batch/v2alpha1/zz_generated.deepcopy.go | 194 +
vendor/k8s.io/api/certificates/v1/doc.go | 23 +
.../api/certificates/v1/generated.pb.go | 2042 +
.../api/certificates/v1/generated.proto | 226 +
vendor/k8s.io/api/certificates/v1/register.go | 61 +
vendor/k8s.io/api/certificates/v1/types.go | 284 +
.../v1/types_swagger_doc_generated.go | 88 +
.../certificates/v1/zz_generated.deepcopy.go | 198 +
vendor/k8s.io/api/certificates/v1beta1/doc.go | 24 +
.../api/certificates/v1beta1/generated.pb.go | 2047 +
.../api/certificates/v1beta1/generated.proto | 178 +
.../api/certificates/v1beta1/register.go | 59 +
.../k8s.io/api/certificates/v1beta1/types.go | 239 +
.../v1beta1/types_swagger_doc_generated.go | 77 +
.../v1beta1/zz_generated.deepcopy.go | 203 +
.../zz_generated.prerelease-lifecycle.go | 73 +
vendor/k8s.io/api/coordination/v1/doc.go | 23 +
.../api/coordination/v1/generated.pb.go | 976 +
.../api/coordination/v1/generated.proto | 80 +
vendor/k8s.io/api/coordination/v1/register.go | 53 +
vendor/k8s.io/api/coordination/v1/types.go | 74 +
.../v1/types_swagger_doc_generated.go | 63 +
.../coordination/v1/zz_generated.deepcopy.go | 124 +
vendor/k8s.io/api/coordination/v1beta1/doc.go | 24 +
.../api/coordination/v1beta1/generated.pb.go | 976 +
.../api/coordination/v1beta1/generated.proto | 80 +
.../api/coordination/v1beta1/register.go | 53 +
.../k8s.io/api/coordination/v1beta1/types.go | 80 +
.../v1beta1/types_swagger_doc_generated.go | 63 +
.../v1beta1/zz_generated.deepcopy.go | 124 +
.../zz_generated.prerelease-lifecycle.go | 73 +
.../api/core/v1/annotation_key_constants.go | 131 +
vendor/k8s.io/api/core/v1/doc.go | 22 +
vendor/k8s.io/api/core/v1/generated.pb.go | 67403 ++++++++++++++++
vendor/k8s.io/api/core/v1/generated.proto | 5473 ++
vendor/k8s.io/api/core/v1/lifecycle.go | 37 +
vendor/k8s.io/api/core/v1/objectreference.go | 33 +
vendor/k8s.io/api/core/v1/register.go | 100 +
vendor/k8s.io/api/core/v1/resource.go | 64 +
vendor/k8s.io/api/core/v1/taint.go | 39 +
vendor/k8s.io/api/core/v1/toleration.go | 56 +
vendor/k8s.io/api/core/v1/types.go | 6106 ++
.../core/v1/types_swagger_doc_generated.go | 2506 +
.../k8s.io/api/core/v1/well_known_labels.go | 50 +
.../k8s.io/api/core/v1/well_known_taints.go | 48 +
.../api/core/v1/zz_generated.deepcopy.go | 5885 ++
vendor/k8s.io/api/discovery/v1alpha1/doc.go | 22 +
.../api/discovery/v1alpha1/generated.pb.go | 1704 +
.../api/discovery/v1alpha1/generated.proto | 156 +
.../k8s.io/api/discovery/v1alpha1/register.go | 56 +
vendor/k8s.io/api/discovery/v1alpha1/types.go | 161 +
.../v1alpha1/types_swagger_doc_generated.go | 86 +
.../discovery/v1alpha1/well_known_labels.go | 28 +
.../v1alpha1/zz_generated.deepcopy.go | 195 +
vendor/k8s.io/api/discovery/v1beta1/doc.go | 23 +
.../api/discovery/v1beta1/generated.pb.go | 1704 +
.../api/discovery/v1beta1/generated.proto | 157 +
.../k8s.io/api/discovery/v1beta1/register.go | 56 +
vendor/k8s.io/api/discovery/v1beta1/types.go | 166 +
.../v1beta1/types_swagger_doc_generated.go | 86 +
.../discovery/v1beta1/well_known_labels.go | 32 +
.../v1beta1/zz_generated.deepcopy.go | 195 +
.../zz_generated.prerelease-lifecycle.go | 57 +
vendor/k8s.io/api/events/v1/doc.go | 23 +
vendor/k8s.io/api/events/v1/generated.pb.go | 1406 +
vendor/k8s.io/api/events/v1/generated.proto | 125 +
vendor/k8s.io/api/events/v1/register.go | 53 +
vendor/k8s.io/api/events/v1/types.go | 119 +
.../events/v1/types_swagger_doc_generated.go | 72 +
.../api/events/v1/zz_generated.deepcopy.go | 117 +
vendor/k8s.io/api/events/v1beta1/doc.go | 24 +
.../k8s.io/api/events/v1beta1/generated.pb.go | 1406 +
.../k8s.io/api/events/v1beta1/generated.proto | 123 +
vendor/k8s.io/api/events/v1beta1/register.go | 53 +
vendor/k8s.io/api/events/v1beta1/types.go | 123 +
.../v1beta1/types_swagger_doc_generated.go | 72 +
.../events/v1beta1/zz_generated.deepcopy.go | 117 +
.../zz_generated.prerelease-lifecycle.go | 57 +
vendor/k8s.io/api/extensions/v1beta1/doc.go | 22 +
.../api/extensions/v1beta1/generated.pb.go | 15315 ++++
.../api/extensions/v1beta1/generated.proto | 1234 +
.../k8s.io/api/extensions/v1beta1/register.go | 65 +
vendor/k8s.io/api/extensions/v1beta1/types.go | 1519 +
.../v1beta1/types_swagger_doc_generated.go | 656 +
.../v1beta1/zz_generated.deepcopy.go | 1489 +
.../zz_generated.prerelease-lifecycle.go | 349 +
vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go | 24 +
.../api/flowcontrol/v1alpha1/generated.pb.go | 5433 ++
.../api/flowcontrol/v1alpha1/generated.proto | 434 +
.../api/flowcontrol/v1alpha1/register.go | 58 +
.../k8s.io/api/flowcontrol/v1alpha1/types.go | 519 +
.../v1alpha1/types_swagger_doc_generated.go | 258 +
.../v1alpha1/zz_generated.deepcopy.go | 541 +
vendor/k8s.io/api/networking/v1/doc.go | 22 +
.../k8s.io/api/networking/v1/generated.pb.go | 5560 ++
.../k8s.io/api/networking/v1/generated.proto | 463 +
vendor/k8s.io/api/networking/v1/register.go | 57 +
vendor/k8s.io/api/networking/v1/types.go | 514 +
.../v1/types_swagger_doc_generated.go | 262 +
.../networking/v1/zz_generated.deepcopy.go | 624 +
vendor/k8s.io/api/networking/v1beta1/doc.go | 23 +
.../api/networking/v1beta1/generated.pb.go | 3183 +
.../api/networking/v1beta1/generated.proto | 275 +
.../k8s.io/api/networking/v1beta1/register.go | 58 +
vendor/k8s.io/api/networking/v1beta1/types.go | 331 +
.../v1beta1/types_swagger_doc_generated.go | 160 +
.../v1beta1/well_known_annotations.go | 32 +
.../v1beta1/zz_generated.deepcopy.go | 351 +
.../zz_generated.prerelease-lifecycle.go | 121 +
vendor/k8s.io/api/node/v1alpha1/doc.go | 23 +
.../k8s.io/api/node/v1alpha1/generated.pb.go | 1583 +
.../k8s.io/api/node/v1alpha1/generated.proto | 118 +
vendor/k8s.io/api/node/v1alpha1/register.go | 52 +
vendor/k8s.io/api/node/v1alpha1/types.go | 116 +
.../v1alpha1/types_swagger_doc_generated.go | 80 +
.../node/v1alpha1/zz_generated.deepcopy.go | 165 +
vendor/k8s.io/api/node/v1beta1/doc.go | 24 +
.../k8s.io/api/node/v1beta1/generated.pb.go | 1412 +
.../k8s.io/api/node/v1beta1/generated.proto | 108 +
vendor/k8s.io/api/node/v1beta1/register.go | 52 +
vendor/k8s.io/api/node/v1beta1/types.go | 110 +
.../v1beta1/types_swagger_doc_generated.go | 71 +
.../api/node/v1beta1/zz_generated.deepcopy.go | 148 +
.../zz_generated.prerelease-lifecycle.go | 57 +
vendor/k8s.io/api/policy/v1beta1/doc.go | 25 +
.../k8s.io/api/policy/v1beta1/generated.pb.go | 5561 ++
.../k8s.io/api/policy/v1beta1/generated.proto | 400 +
vendor/k8s.io/api/policy/v1beta1/register.go | 56 +
vendor/k8s.io/api/policy/v1beta1/types.go | 500 +
.../v1beta1/types_swagger_doc_generated.go | 243 +
.../policy/v1beta1/zz_generated.deepcopy.go | 540 +
.../zz_generated.prerelease-lifecycle.go | 111 +
vendor/k8s.io/api/rbac/v1/doc.go | 23 +
vendor/k8s.io/api/rbac/v1/generated.pb.go | 3266 +
vendor/k8s.io/api/rbac/v1/generated.proto | 199 +
vendor/k8s.io/api/rbac/v1/register.go | 58 +
vendor/k8s.io/api/rbac/v1/types.go | 237 +
.../rbac/v1/types_swagger_doc_generated.go | 158 +
.../api/rbac/v1/zz_generated.deepcopy.go | 389 +
vendor/k8s.io/api/rbac/v1alpha1/doc.go | 23 +
.../k8s.io/api/rbac/v1alpha1/generated.pb.go | 3267 +
.../k8s.io/api/rbac/v1alpha1/generated.proto | 208 +
vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 +
vendor/k8s.io/api/rbac/v1alpha1/types.go | 246 +
.../v1alpha1/types_swagger_doc_generated.go | 158 +
.../rbac/v1alpha1/zz_generated.deepcopy.go | 389 +
vendor/k8s.io/api/rbac/v1beta1/doc.go | 24 +
.../k8s.io/api/rbac/v1beta1/generated.pb.go | 3266 +
.../k8s.io/api/rbac/v1beta1/generated.proto | 208 +
vendor/k8s.io/api/rbac/v1beta1/register.go | 58 +
vendor/k8s.io/api/rbac/v1beta1/types.go | 277 +
.../v1beta1/types_swagger_doc_generated.go | 158 +
.../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 +
.../zz_generated.prerelease-lifecycle.go | 217 +
vendor/k8s.io/api/scheduling/v1/doc.go | 23 +
.../k8s.io/api/scheduling/v1/generated.pb.go | 735 +
.../k8s.io/api/scheduling/v1/generated.proto | 75 +
vendor/k8s.io/api/scheduling/v1/register.go | 55 +
vendor/k8s.io/api/scheduling/v1/types.go | 74 +
.../v1/types_swagger_doc_generated.go | 53 +
.../scheduling/v1/zz_generated.deepcopy.go | 90 +
vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 23 +
.../api/scheduling/v1alpha1/generated.pb.go | 735 +
.../api/scheduling/v1alpha1/generated.proto | 76 +
.../api/scheduling/v1alpha1/register.go | 52 +
.../k8s.io/api/scheduling/v1alpha1/types.go | 75 +
.../v1alpha1/types_swagger_doc_generated.go | 53 +
.../v1alpha1/zz_generated.deepcopy.go | 90 +
vendor/k8s.io/api/scheduling/v1beta1/doc.go | 24 +
.../api/scheduling/v1beta1/generated.pb.go | 735 +
.../api/scheduling/v1beta1/generated.proto | 76 +
.../k8s.io/api/scheduling/v1beta1/register.go | 52 +
vendor/k8s.io/api/scheduling/v1beta1/types.go | 83 +
.../v1beta1/types_swagger_doc_generated.go | 53 +
.../v1beta1/zz_generated.deepcopy.go | 90 +
.../zz_generated.prerelease-lifecycle.go | 73 +
vendor/k8s.io/api/settings/v1alpha1/doc.go | 23 +
.../api/settings/v1alpha1/generated.pb.go | 1053 +
.../api/settings/v1alpha1/generated.proto | 75 +
.../k8s.io/api/settings/v1alpha1/register.go | 52 +
vendor/k8s.io/api/settings/v1alpha1/types.go | 70 +
.../v1alpha1/types_swagger_doc_generated.go | 61 +
.../v1alpha1/zz_generated.deepcopy.go | 131 +
vendor/k8s.io/api/storage/v1/doc.go | 22 +
vendor/k8s.io/api/storage/v1/generated.pb.go | 4545 ++
vendor/k8s.io/api/storage/v1/generated.proto | 398 +
vendor/k8s.io/api/storage/v1/register.go | 62 +
vendor/k8s.io/api/storage/v1/types.go | 497 +
.../storage/v1/types_swagger_doc_generated.go | 202 +
.../api/storage/v1/zz_generated.deepcopy.go | 504 +
vendor/k8s.io/api/storage/v1alpha1/doc.go | 22 +
.../api/storage/v1alpha1/generated.pb.go | 2368 +
.../api/storage/v1alpha1/generated.proto | 211 +
.../k8s.io/api/storage/v1alpha1/register.go | 52 +
vendor/k8s.io/api/storage/v1alpha1/types.go | 218 +
.../v1alpha1/types_swagger_doc_generated.go | 115 +
.../storage/v1alpha1/zz_generated.deepcopy.go | 250 +
vendor/k8s.io/api/storage/v1beta1/doc.go | 23 +
.../api/storage/v1beta1/generated.pb.go | 4545 ++
.../api/storage/v1beta1/generated.proto | 400 +
vendor/k8s.io/api/storage/v1beta1/register.go | 62 +
vendor/k8s.io/api/storage/v1beta1/types.go | 520 +
.../v1beta1/types_swagger_doc_generated.go | 202 +
.../storage/v1beta1/zz_generated.deepcopy.go | 504 +
.../zz_generated.prerelease-lifecycle.go | 217 +
vendor/k8s.io/apiextensions-apiserver/LICENSE | 202 +
.../pkg/apis/apiextensions/deepcopy.go | 294 +
.../pkg/apis/apiextensions/doc.go | 21 +
.../pkg/apis/apiextensions/helpers.go | 257 +
.../pkg/apis/apiextensions/register.go | 51 +
.../pkg/apis/apiextensions/types.go | 420 +
.../apis/apiextensions/types_jsonschema.go | 164 +
.../apiextensions/v1/.import-restrictions | 5 +
.../pkg/apis/apiextensions/v1/conversion.go | 198 +
.../pkg/apis/apiextensions/v1/deepcopy.go | 254 +
.../pkg/apis/apiextensions/v1/defaults.go | 61 +
.../pkg/apis/apiextensions/v1/doc.go | 25 +
.../pkg/apis/apiextensions/v1/generated.pb.go | 9066 +++
.../pkg/apis/apiextensions/v1/generated.proto | 649 +
.../pkg/apis/apiextensions/v1/marshal.go | 135 +
.../pkg/apis/apiextensions/v1/register.go | 62 +
.../pkg/apis/apiextensions/v1/types.go | 476 +
.../apis/apiextensions/v1/types_jsonschema.go | 257 +
.../v1/zz_generated.conversion.go | 1278 +
.../apiextensions/v1/zz_generated.deepcopy.go | 668 +
.../apiextensions/v1/zz_generated.defaults.go | 57 +
.../v1beta1/.import-restrictions | 5 +
.../apis/apiextensions/v1beta1/conversion.go | 59 +
.../apis/apiextensions/v1beta1/deepcopy.go | 270 +
.../apis/apiextensions/v1beta1/defaults.go | 82 +
.../pkg/apis/apiextensions/v1beta1/doc.go | 26 +
.../apiextensions/v1beta1/generated.pb.go | 9103 +++
.../apiextensions/v1beta1/generated.proto | 686 +
.../pkg/apis/apiextensions/v1beta1/marshal.go | 135 +
.../apis/apiextensions/v1beta1/register.go | 62 +
.../pkg/apis/apiextensions/v1beta1/types.go | 522 +
.../apiextensions/v1beta1/types_jsonschema.go | 257 +
.../v1beta1/zz_generated.conversion.go | 1331 +
.../v1beta1/zz_generated.deepcopy.go | 667 +
.../v1beta1/zz_generated.defaults.go | 55 +
.../zz_generated.prerelease-lifecycle.go | 97 +
.../apiextensions/zz_generated.deepcopy.go | 559 +
.../client/clientset/clientset/clientset.go | 111 +
.../pkg/client/clientset/clientset/doc.go | 20 +
.../client/clientset/clientset/scheme/doc.go | 20 +
.../clientset/clientset/scheme/register.go | 58 +
.../apiextensions/v1/apiextensions_client.go | 89 +
.../v1/customresourcedefinition.go | 184 +
.../clientset/typed/apiextensions/v1/doc.go | 20 +
.../apiextensions/v1/generated_expansion.go | 21 +
.../v1beta1/apiextensions_client.go | 89 +
.../v1beta1/customresourcedefinition.go | 184 +
.../typed/apiextensions/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 21 +
vendor/k8s.io/apimachinery/LICENSE | 202 +
.../apimachinery/pkg/api/equality/semantic.go | 49 +
.../k8s.io/apimachinery/pkg/api/errors/OWNERS | 23 +
.../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 +
.../apimachinery/pkg/api/errors/errors.go | 697 +
.../k8s.io/apimachinery/pkg/api/meta/OWNERS | 21 +
.../apimachinery/pkg/api/meta/conditions.go | 101 +
.../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 +
.../apimachinery/pkg/api/meta/errors.go | 121 +
.../pkg/api/meta/firsthit_restmapper.go | 97 +
.../k8s.io/apimachinery/pkg/api/meta/help.go | 264 +
.../apimachinery/pkg/api/meta/interfaces.go | 134 +
.../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 +
.../k8s.io/apimachinery/pkg/api/meta/meta.go | 648 +
.../pkg/api/meta/multirestmapper.go | 210 +
.../apimachinery/pkg/api/meta/priority.go | 222 +
.../apimachinery/pkg/api/meta/restmapper.go | 518 +
.../apimachinery/pkg/api/resource/OWNERS | 13 +
.../apimachinery/pkg/api/resource/amount.go | 299 +
.../pkg/api/resource/generated.pb.go | 89 +
.../pkg/api/resource/generated.proto | 88 +
.../apimachinery/pkg/api/resource/math.go | 310 +
.../apimachinery/pkg/api/resource/quantity.go | 733 +
.../pkg/api/resource/quantity_proto.go | 288 +
.../pkg/api/resource/scale_int.go | 95 +
.../apimachinery/pkg/api/resource/suffix.go | 198 +
.../pkg/api/resource/zz_generated.deepcopy.go | 27 +
.../pkg/apis/meta/internalversion/doc.go | 20 +
.../pkg/apis/meta/internalversion/register.go | 89 +
.../apis/meta/internalversion/scheme/doc.go | 17 +
.../meta/internalversion/scheme/register.go | 39 +
.../pkg/apis/meta/internalversion/types.go | 80 +
.../zz_generated.conversion.go | 145 +
.../internalversion/zz_generated.deepcopy.go | 96 +
.../apimachinery/pkg/apis/meta/v1/OWNERS | 31 +
.../pkg/apis/meta/v1/controller_ref.go | 65 +
.../pkg/apis/meta/v1/conversion.go | 355 +
.../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 +
.../apimachinery/pkg/apis/meta/v1/doc.go | 24 +
.../apimachinery/pkg/apis/meta/v1/duration.go | 65 +
.../pkg/apis/meta/v1/generated.pb.go | 11477 +++
.../pkg/apis/meta/v1/generated.proto | 1103 +
.../pkg/apis/meta/v1/group_version.go | 148 +
.../apimachinery/pkg/apis/meta/v1/helpers.go | 282 +
.../apimachinery/pkg/apis/meta/v1/labels.go | 55 +
.../apimachinery/pkg/apis/meta/v1/meta.go | 180 +
.../pkg/apis/meta/v1/micro_time.go | 196 +
.../pkg/apis/meta/v1/micro_time_proto.go | 80 +
.../apimachinery/pkg/apis/meta/v1/register.go | 108 +
.../apimachinery/pkg/apis/meta/v1/time.go | 197 +
.../pkg/apis/meta/v1/time_proto.go | 100 +
.../apimachinery/pkg/apis/meta/v1/types.go | 1413 +
.../meta/v1/types_swagger_doc_generated.go | 457 +
.../pkg/apis/meta/v1/unstructured/helpers.go | 508 +
.../apis/meta/v1/unstructured/unstructured.go | 496 +
.../meta/v1/unstructured/unstructured_list.go | 210 +
.../v1/unstructured/zz_generated.deepcopy.go | 55 +
.../apimachinery/pkg/apis/meta/v1/watch.go | 89 +
.../apis/meta/v1/zz_generated.conversion.go | 535 +
.../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1190 +
.../pkg/apis/meta/v1/zz_generated.defaults.go | 32 +
.../pkg/apis/meta/v1beta1/conversion.go | 46 +
.../pkg/apis/meta/v1beta1/deepcopy.go | 17 +
.../apimachinery/pkg/apis/meta/v1beta1/doc.go | 23 +
.../pkg/apis/meta/v1beta1/generated.pb.go | 415 +
.../pkg/apis/meta/v1beta1/generated.proto | 41 +
.../pkg/apis/meta/v1beta1/register.go | 62 +
.../pkg/apis/meta/v1beta1/types.go | 84 +
.../v1beta1/types_swagger_doc_generated.go | 40 +
.../meta/v1beta1/zz_generated.deepcopy.go | 59 +
.../meta/v1beta1/zz_generated.defaults.go | 32 +
.../apimachinery/pkg/conversion/converter.go | 817 +
.../apimachinery/pkg/conversion/deep_equal.go | 36 +
.../k8s.io/apimachinery/pkg/conversion/doc.go | 24 +
.../apimachinery/pkg/conversion/helper.go | 39 +
.../pkg/conversion/queryparams/convert.go | 194 +
.../pkg/conversion/queryparams/doc.go | 19 +
vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 +
.../k8s.io/apimachinery/pkg/fields/fields.go | 62 +
.../apimachinery/pkg/fields/requirements.go | 30 +
.../apimachinery/pkg/fields/selector.go | 478 +
vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 +
.../k8s.io/apimachinery/pkg/labels/labels.go | 189 +
.../apimachinery/pkg/labels/selector.go | 923 +
.../pkg/labels/zz_generated.deepcopy.go | 42 +
.../k8s.io/apimachinery/pkg/runtime/codec.go | 396 +
.../apimachinery/pkg/runtime/codec_check.go | 56 +
.../apimachinery/pkg/runtime/conversion.go | 196 +
.../apimachinery/pkg/runtime/converter.go | 707 +
vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 51 +
.../apimachinery/pkg/runtime/embedded.go | 149 +
.../k8s.io/apimachinery/pkg/runtime/error.go | 151 +
.../apimachinery/pkg/runtime/extension.go | 51 +
.../apimachinery/pkg/runtime/generated.pb.go | 855 +
.../apimachinery/pkg/runtime/generated.proto | 127 +
.../k8s.io/apimachinery/pkg/runtime/helper.go | 259 +
.../apimachinery/pkg/runtime/interfaces.go | 344 +
.../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 +
.../apimachinery/pkg/runtime/negotiate.go | 146 +
.../apimachinery/pkg/runtime/register.go | 31 +
.../pkg/runtime/schema/generated.pb.go | 59 +
.../pkg/runtime/schema/generated.proto | 26 +
.../pkg/runtime/schema/group_version.go | 305 +
.../pkg/runtime/schema/interfaces.go | 40 +
.../k8s.io/apimachinery/pkg/runtime/scheme.go | 728 +
.../pkg/runtime/scheme_builder.go | 48 +
.../pkg/runtime/serializer/codec_factory.go | 324 +
.../pkg/runtime/serializer/json/json.go | 388 +
.../pkg/runtime/serializer/json/meta.go | 63 +
.../runtime/serializer/negotiated_codec.go | 43 +
.../pkg/runtime/serializer/protobuf/doc.go | 18 +
.../runtime/serializer/protobuf/protobuf.go | 472 +
.../serializer/recognizer/recognizer.go | 127 +
.../runtime/serializer/streaming/streaming.go | 137 +
.../serializer/versioning/versioning.go | 250 +
.../pkg/runtime/swagger_doc_generator.go | 262 +
.../k8s.io/apimachinery/pkg/runtime/types.go | 126 +
.../apimachinery/pkg/runtime/types_proto.go | 89 +
.../pkg/runtime/zz_generated.deepcopy.go | 75 +
.../apimachinery/pkg/selection/operator.go | 33 +
vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 +
.../apimachinery/pkg/types/namespacedname.go | 43 +
.../k8s.io/apimachinery/pkg/types/nodename.go | 43 +
vendor/k8s.io/apimachinery/pkg/types/patch.go | 29 +
vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 +
.../apimachinery/pkg/util/cache/expiring.go | 192 +
.../pkg/util/cache/lruexpirecache.go | 102 +
.../apimachinery/pkg/util/clock/clock.go | 393 +
.../k8s.io/apimachinery/pkg/util/diff/diff.go | 157 +
.../pkg/util/duration/duration.go | 93 +
.../apimachinery/pkg/util/errors/doc.go | 18 +
.../apimachinery/pkg/util/errors/errors.go | 249 +
.../apimachinery/pkg/util/framer/framer.go | 167 +
.../apimachinery/pkg/util/httpstream/doc.go | 19 +
.../pkg/util/httpstream/httpstream.go | 157 +
.../pkg/util/httpstream/spdy/connection.go | 145 +
.../pkg/util/httpstream/spdy/roundtripper.go | 332 +
.../pkg/util/httpstream/spdy/upgrade.go | 107 +
.../pkg/util/intstr/generated.pb.go | 372 +
.../pkg/util/intstr/generated.proto | 43 +
.../apimachinery/pkg/util/intstr/intstr.go | 185 +
.../k8s.io/apimachinery/pkg/util/json/json.go | 156 +
.../apimachinery/pkg/util/mergepatch/OWNERS | 7 +
.../pkg/util/mergepatch/errors.go | 102 +
.../apimachinery/pkg/util/mergepatch/util.go | 133 +
.../pkg/util/naming/from_stack.go | 93 +
.../k8s.io/apimachinery/pkg/util/net/http.go | 773 +
.../apimachinery/pkg/util/net/interface.go | 457 +
.../apimachinery/pkg/util/net/port_range.go | 149 +
.../apimachinery/pkg/util/net/port_split.go | 77 +
.../k8s.io/apimachinery/pkg/util/net/util.go | 56 +
.../k8s.io/apimachinery/pkg/util/rand/rand.go | 127 +
.../pkg/util/remotecommand/constants.go | 53 +
.../apimachinery/pkg/util/runtime/runtime.go | 173 +
.../k8s.io/apimachinery/pkg/util/sets/byte.go | 205 +
.../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 +
.../apimachinery/pkg/util/sets/empty.go | 23 +
.../k8s.io/apimachinery/pkg/util/sets/int.go | 205 +
.../apimachinery/pkg/util/sets/int32.go | 205 +
.../apimachinery/pkg/util/sets/int64.go | 205 +
.../apimachinery/pkg/util/sets/string.go | 205 +
.../pkg/util/strategicpatch/OWNERS | 8 +
.../pkg/util/strategicpatch/errors.go | 49 +
.../pkg/util/strategicpatch/meta.go | 194 +
.../pkg/util/strategicpatch/patch.go | 2172 +
.../pkg/util/strategicpatch/types.go | 193 +
.../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 27 +
.../pkg/util/validation/field/errors.go | 272 +
.../pkg/util/validation/field/path.go | 91 +
.../pkg/util/validation/validation.go | 503 +
.../k8s.io/apimachinery/pkg/util/wait/doc.go | 19 +
.../k8s.io/apimachinery/pkg/util/wait/wait.go | 606 +
.../apimachinery/pkg/util/yaml/decoder.go | 348 +
vendor/k8s.io/apimachinery/pkg/version/doc.go | 20 +
.../apimachinery/pkg/version/helpers.go | 88 +
.../k8s.io/apimachinery/pkg/version/types.go | 37 +
vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 +
.../k8s.io/apimachinery/pkg/watch/filter.go | 105 +
vendor/k8s.io/apimachinery/pkg/watch/mux.go | 260 +
.../apimachinery/pkg/watch/streamwatcher.go | 132 +
vendor/k8s.io/apimachinery/pkg/watch/watch.go | 324 +
.../pkg/watch/zz_generated.deepcopy.go | 40 +
.../third_party/forked/golang/json/OWNERS | 7 +
.../third_party/forked/golang/json/fields.go | 513 +
.../third_party/forked/golang/netutil/addr.go | 27 +
.../forked/golang/reflect/deep_equal.go | 388 +
vendor/k8s.io/apiserver/LICENSE | 202 +
.../apiserver/pkg/authentication/user/doc.go | 19 +
.../apiserver/pkg/authentication/user/user.go | 83 +
vendor/k8s.io/autoscaler/LICENSE | 202 +
.../apis/autoscaling.k8s.io/v1beta2/doc.go | 21 +
.../autoscaling.k8s.io/v1beta2/register.go | 58 +
.../apis/autoscaling.k8s.io/v1beta2/types.go | 339 +
.../v1beta2/zz_generated.deepcopy.go | 432 +
vendor/k8s.io/client-go/LICENSE | 202 +
.../client-go/discovery/discovery_client.go | 516 +
vendor/k8s.io/client-go/discovery/doc.go | 19 +
.../client-go/discovery/fake/discovery.go | 160 +
vendor/k8s.io/client-go/discovery/helper.go | 125 +
vendor/k8s.io/client-go/dynamic/interface.go | 61 +
vendor/k8s.io/client-go/dynamic/scheme.go | 108 +
vendor/k8s.io/client-go/dynamic/simple.go | 327 +
.../k8s.io/client-go/kubernetes/clientset.go | 657 +
vendor/k8s.io/client-go/kubernetes/doc.go | 20 +
vendor/k8s.io/client-go/kubernetes/import.go | 19 +
.../k8s.io/client-go/kubernetes/scheme/doc.go | 20 +
.../client-go/kubernetes/scheme/register.go | 136 +
.../v1/admissionregistration_client.go | 94 +
.../typed/admissionregistration/v1/doc.go | 20 +
.../v1/generated_expansion.go | 23 +
.../v1/mutatingwebhookconfiguration.go | 168 +
.../v1/validatingwebhookconfiguration.go | 168 +
.../v1beta1/admissionregistration_client.go | 94 +
.../admissionregistration/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 23 +
.../v1beta1/mutatingwebhookconfiguration.go | 168 +
.../v1beta1/validatingwebhookconfiguration.go | 168 +
.../kubernetes/typed/apps/v1/apps_client.go | 109 +
.../typed/apps/v1/controllerrevision.go | 178 +
.../kubernetes/typed/apps/v1/daemonset.go | 195 +
.../kubernetes/typed/apps/v1/deployment.go | 228 +
.../client-go/kubernetes/typed/apps/v1/doc.go | 20 +
.../typed/apps/v1/generated_expansion.go | 29 +
.../kubernetes/typed/apps/v1/replicaset.go | 228 +
.../kubernetes/typed/apps/v1/statefulset.go | 228 +
.../typed/apps/v1beta1/apps_client.go | 99 +
.../typed/apps/v1beta1/controllerrevision.go | 178 +
.../typed/apps/v1beta1/deployment.go | 195 +
.../kubernetes/typed/apps/v1beta1/doc.go | 20 +
.../typed/apps/v1beta1/generated_expansion.go | 25 +
.../typed/apps/v1beta1/statefulset.go | 195 +
.../typed/apps/v1beta2/apps_client.go | 109 +
.../typed/apps/v1beta2/controllerrevision.go | 178 +
.../typed/apps/v1beta2/daemonset.go | 195 +
.../typed/apps/v1beta2/deployment.go | 195 +
.../kubernetes/typed/apps/v1beta2/doc.go | 20 +
.../typed/apps/v1beta2/generated_expansion.go | 29 +
.../typed/apps/v1beta2/replicaset.go | 195 +
.../typed/apps/v1beta2/statefulset.go | 227 +
.../v1/authentication_client.go | 89 +
.../kubernetes/typed/authentication/v1/doc.go | 20 +
.../authentication/v1/generated_expansion.go | 21 +
.../typed/authentication/v1/tokenreview.go | 64 +
.../v1beta1/authentication_client.go | 89 +
.../typed/authentication/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 21 +
.../authentication/v1beta1/tokenreview.go | 64 +
.../authorization/v1/authorization_client.go | 104 +
.../kubernetes/typed/authorization/v1/doc.go | 20 +
.../authorization/v1/generated_expansion.go | 27 +
.../v1/localsubjectaccessreview.go | 67 +
.../v1/selfsubjectaccessreview.go | 64 +
.../v1/selfsubjectrulesreview.go | 64 +
.../authorization/v1/subjectaccessreview.go | 64 +
.../v1beta1/authorization_client.go | 104 +
.../typed/authorization/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 27 +
.../v1beta1/localsubjectaccessreview.go | 67 +
.../v1beta1/selfsubjectaccessreview.go | 64 +
.../v1beta1/selfsubjectrulesreview.go | 64 +
.../v1beta1/subjectaccessreview.go | 64 +
.../autoscaling/v1/autoscaling_client.go | 89 +
.../kubernetes/typed/autoscaling/v1/doc.go | 20 +
.../autoscaling/v1/generated_expansion.go | 21 +
.../autoscaling/v1/horizontalpodautoscaler.go | 195 +
.../autoscaling/v2beta1/autoscaling_client.go | 89 +
.../typed/autoscaling/v2beta1/doc.go | 20 +
.../v2beta1/generated_expansion.go | 21 +
.../v2beta1/horizontalpodautoscaler.go | 195 +
.../autoscaling/v2beta2/autoscaling_client.go | 89 +
.../typed/autoscaling/v2beta2/doc.go | 20 +
.../v2beta2/generated_expansion.go | 21 +
.../v2beta2/horizontalpodautoscaler.go | 195 +
.../kubernetes/typed/batch/v1/batch_client.go | 89 +
.../kubernetes/typed/batch/v1/doc.go | 20 +
.../typed/batch/v1/generated_expansion.go | 21 +
.../kubernetes/typed/batch/v1/job.go | 195 +
.../typed/batch/v1beta1/batch_client.go | 89 +
.../kubernetes/typed/batch/v1beta1/cronjob.go | 195 +
.../kubernetes/typed/batch/v1beta1/doc.go | 20 +
.../batch/v1beta1/generated_expansion.go | 21 +
.../typed/batch/v2alpha1/batch_client.go | 89 +
.../typed/batch/v2alpha1/cronjob.go | 195 +
.../kubernetes/typed/batch/v2alpha1/doc.go | 20 +
.../batch/v2alpha1/generated_expansion.go | 21 +
.../certificates/v1/certificates_client.go | 89 +
.../v1/certificatesigningrequest.go | 200 +
.../kubernetes/typed/certificates/v1/doc.go | 20 +
.../certificates/v1/generated_expansion.go | 21 +
.../v1beta1/certificates_client.go | 89 +
.../v1beta1/certificatesigningrequest.go | 184 +
.../certificatesigningrequest_expansion.go | 42 +
.../typed/certificates/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 19 +
.../coordination/v1/coordination_client.go | 89 +
.../kubernetes/typed/coordination/v1/doc.go | 20 +
.../coordination/v1/generated_expansion.go | 21 +
.../kubernetes/typed/coordination/v1/lease.go | 178 +
.../v1beta1/coordination_client.go | 89 +
.../typed/coordination/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 21 +
.../typed/coordination/v1beta1/lease.go | 178 +
.../typed/core/v1/componentstatus.go | 168 +
.../kubernetes/typed/core/v1/configmap.go | 178 +
.../kubernetes/typed/core/v1/core_client.go | 164 +
.../client-go/kubernetes/typed/core/v1/doc.go | 20 +
.../kubernetes/typed/core/v1/endpoints.go | 178 +
.../kubernetes/typed/core/v1/event.go | 178 +
.../typed/core/v1/event_expansion.go | 165 +
.../typed/core/v1/generated_expansion.go | 41 +
.../kubernetes/typed/core/v1/limitrange.go | 178 +
.../kubernetes/typed/core/v1/namespace.go | 168 +
.../typed/core/v1/namespace_expansion.go | 37 +
.../kubernetes/typed/core/v1/node.go | 184 +
.../typed/core/v1/node_expansion.go | 45 +
.../typed/core/v1/persistentvolume.go | 184 +
.../typed/core/v1/persistentvolumeclaim.go | 195 +
.../client-go/kubernetes/typed/core/v1/pod.go | 227 +
.../kubernetes/typed/core/v1/pod_expansion.go | 64 +
.../kubernetes/typed/core/v1/podtemplate.go | 178 +
.../typed/core/v1/replicationcontroller.go | 228 +
.../kubernetes/typed/core/v1/resourcequota.go | 195 +
.../kubernetes/typed/core/v1/secret.go | 178 +
.../kubernetes/typed/core/v1/service.go | 178 +
.../typed/core/v1/service_expansion.go | 41 +
.../typed/core/v1/serviceaccount.go | 196 +
.../discovery/v1alpha1/discovery_client.go | 89 +
.../typed/discovery/v1alpha1/doc.go | 20 +
.../typed/discovery/v1alpha1/endpointslice.go | 178 +
.../discovery/v1alpha1/generated_expansion.go | 21 +
.../discovery/v1beta1/discovery_client.go | 89 +
.../kubernetes/typed/discovery/v1beta1/doc.go | 20 +
.../typed/discovery/v1beta1/endpointslice.go | 178 +
.../discovery/v1beta1/generated_expansion.go | 21 +
.../kubernetes/typed/events/v1/doc.go | 20 +
.../kubernetes/typed/events/v1/event.go | 178 +
.../typed/events/v1/events_client.go | 89 +
.../typed/events/v1/generated_expansion.go | 21 +
.../kubernetes/typed/events/v1beta1/doc.go | 20 +
.../kubernetes/typed/events/v1beta1/event.go | 178 +
.../typed/events/v1beta1/event_expansion.go | 99 +
.../typed/events/v1beta1/events_client.go | 89 +
.../events/v1beta1/generated_expansion.go | 19 +
.../typed/extensions/v1beta1/daemonset.go | 195 +
.../typed/extensions/v1beta1/deployment.go | 227 +
.../v1beta1/deployment_expansion.go | 35 +
.../typed/extensions/v1beta1/doc.go | 20 +
.../extensions/v1beta1/extensions_client.go | 114 +
.../extensions/v1beta1/generated_expansion.go | 29 +
.../typed/extensions/v1beta1/ingress.go | 195 +
.../typed/extensions/v1beta1/networkpolicy.go | 178 +
.../extensions/v1beta1/podsecuritypolicy.go | 168 +
.../typed/extensions/v1beta1/replicaset.go | 227 +
.../typed/flowcontrol/v1alpha1/doc.go | 20 +
.../v1alpha1/flowcontrol_client.go | 94 +
.../typed/flowcontrol/v1alpha1/flowschema.go | 184 +
.../v1alpha1/generated_expansion.go | 23 +
.../v1alpha1/prioritylevelconfiguration.go | 184 +
.../kubernetes/typed/networking/v1/doc.go | 20 +
.../networking/v1/generated_expansion.go | 25 +
.../kubernetes/typed/networking/v1/ingress.go | 195 +
.../typed/networking/v1/ingressclass.go | 168 +
.../typed/networking/v1/networking_client.go | 99 +
.../typed/networking/v1/networkpolicy.go | 178 +
.../typed/networking/v1beta1/doc.go | 20 +
.../networking/v1beta1/generated_expansion.go | 23 +
.../typed/networking/v1beta1/ingress.go | 195 +
.../typed/networking/v1beta1/ingressclass.go | 168 +
.../networking/v1beta1/networking_client.go | 94 +
.../kubernetes/typed/node/v1alpha1/doc.go | 20 +
.../node/v1alpha1/generated_expansion.go | 21 +
.../typed/node/v1alpha1/node_client.go | 89 +
.../typed/node/v1alpha1/runtimeclass.go | 168 +
.../kubernetes/typed/node/v1beta1/doc.go | 20 +
.../typed/node/v1beta1/generated_expansion.go | 21 +
.../typed/node/v1beta1/node_client.go | 89 +
.../typed/node/v1beta1/runtimeclass.go | 168 +
.../kubernetes/typed/policy/v1beta1/doc.go | 20 +
.../typed/policy/v1beta1/eviction.go | 48 +
.../policy/v1beta1/eviction_expansion.go | 40 +
.../policy/v1beta1/generated_expansion.go | 23 +
.../policy/v1beta1/poddisruptionbudget.go | 195 +
.../typed/policy/v1beta1/podsecuritypolicy.go | 168 +
.../typed/policy/v1beta1/policy_client.go | 99 +
.../kubernetes/typed/rbac/v1/clusterrole.go | 168 +
.../typed/rbac/v1/clusterrolebinding.go | 168 +
.../client-go/kubernetes/typed/rbac/v1/doc.go | 20 +
.../typed/rbac/v1/generated_expansion.go | 27 +
.../kubernetes/typed/rbac/v1/rbac_client.go | 104 +
.../kubernetes/typed/rbac/v1/role.go | 178 +
.../kubernetes/typed/rbac/v1/rolebinding.go | 178 +
.../typed/rbac/v1alpha1/clusterrole.go | 168 +
.../typed/rbac/v1alpha1/clusterrolebinding.go | 168 +
.../kubernetes/typed/rbac/v1alpha1/doc.go | 20 +
.../rbac/v1alpha1/generated_expansion.go | 27 +
.../typed/rbac/v1alpha1/rbac_client.go | 104 +
.../kubernetes/typed/rbac/v1alpha1/role.go | 178 +
.../typed/rbac/v1alpha1/rolebinding.go | 178 +
.../typed/rbac/v1beta1/clusterrole.go | 168 +
.../typed/rbac/v1beta1/clusterrolebinding.go | 168 +
.../kubernetes/typed/rbac/v1beta1/doc.go | 20 +
.../typed/rbac/v1beta1/generated_expansion.go | 27 +
.../typed/rbac/v1beta1/rbac_client.go | 104 +
.../kubernetes/typed/rbac/v1beta1/role.go | 178 +
.../typed/rbac/v1beta1/rolebinding.go | 178 +
.../kubernetes/typed/scheduling/v1/doc.go | 20 +
.../scheduling/v1/generated_expansion.go | 21 +
.../typed/scheduling/v1/priorityclass.go | 168 +
.../typed/scheduling/v1/scheduling_client.go | 89 +
.../typed/scheduling/v1alpha1/doc.go | 20 +
.../v1alpha1/generated_expansion.go | 21 +
.../scheduling/v1alpha1/priorityclass.go | 168 +
.../scheduling/v1alpha1/scheduling_client.go | 89 +
.../typed/scheduling/v1beta1/doc.go | 20 +
.../scheduling/v1beta1/generated_expansion.go | 21 +
.../typed/scheduling/v1beta1/priorityclass.go | 168 +
.../scheduling/v1beta1/scheduling_client.go | 89 +
.../kubernetes/typed/settings/v1alpha1/doc.go | 20 +
.../settings/v1alpha1/generated_expansion.go | 21 +
.../typed/settings/v1alpha1/podpreset.go | 178 +
.../settings/v1alpha1/settings_client.go | 89 +
.../kubernetes/typed/storage/v1/csidriver.go | 168 +
.../kubernetes/typed/storage/v1/csinode.go | 168 +
.../kubernetes/typed/storage/v1/doc.go | 20 +
.../typed/storage/v1/generated_expansion.go | 27 +
.../typed/storage/v1/storage_client.go | 104 +
.../typed/storage/v1/storageclass.go | 168 +
.../typed/storage/v1/volumeattachment.go | 184 +
.../storage/v1alpha1/csistoragecapacity.go | 178 +
.../kubernetes/typed/storage/v1alpha1/doc.go | 20 +
.../storage/v1alpha1/generated_expansion.go | 23 +
.../typed/storage/v1alpha1/storage_client.go | 94 +
.../storage/v1alpha1/volumeattachment.go | 184 +
.../typed/storage/v1beta1/csidriver.go | 168 +
.../typed/storage/v1beta1/csinode.go | 168 +
.../kubernetes/typed/storage/v1beta1/doc.go | 20 +
.../storage/v1beta1/generated_expansion.go | 27 +
.../typed/storage/v1beta1/storage_client.go | 104 +
.../typed/storage/v1beta1/storageclass.go | 168 +
.../typed/storage/v1beta1/volumeattachment.go | 184 +
vendor/k8s.io/client-go/metadata/interface.go | 49 +
vendor/k8s.io/client-go/metadata/metadata.go | 307 +
.../pkg/apis/clientauthentication/OWNERS | 9 +
.../pkg/apis/clientauthentication/doc.go | 20 +
.../pkg/apis/clientauthentication/register.go | 50 +
.../pkg/apis/clientauthentication/types.go | 77 +
.../apis/clientauthentication/v1alpha1/doc.go | 24 +
.../clientauthentication/v1alpha1/register.go | 55 +
.../clientauthentication/v1alpha1/types.go | 78 +
.../v1alpha1/zz_generated.conversion.go | 176 +
.../v1alpha1/zz_generated.deepcopy.go | 128 +
.../v1alpha1/zz_generated.defaults.go | 32 +
.../v1beta1/conversion.go | 26 +
.../apis/clientauthentication/v1beta1/doc.go | 24 +
.../clientauthentication/v1beta1/register.go | 55 +
.../clientauthentication/v1beta1/types.go | 59 +
.../v1beta1/zz_generated.conversion.go | 137 +
.../v1beta1/zz_generated.deepcopy.go | 92 +
.../v1beta1/zz_generated.defaults.go | 32 +
.../zz_generated.deepcopy.go | 128 +
.../client-go/pkg/version/.gitattributes | 1 +
vendor/k8s.io/client-go/pkg/version/base.go | 63 +
vendor/k8s.io/client-go/pkg/version/def.bzl | 38 +
vendor/k8s.io/client-go/pkg/version/doc.go | 21 +
.../k8s.io/client-go/pkg/version/version.go | 42 +
.../plugin/pkg/client/auth/exec/exec.go | 496 +
.../plugin/pkg/client/auth/exec/metrics.go | 60 +
vendor/k8s.io/client-go/rest/OWNERS | 25 +
vendor/k8s.io/client-go/rest/client.go | 201 +
vendor/k8s.io/client-go/rest/config.go | 629 +
vendor/k8s.io/client-go/rest/plugin.go | 73 +
vendor/k8s.io/client-go/rest/request.go | 1362 +
vendor/k8s.io/client-go/rest/transport.go | 121 +
vendor/k8s.io/client-go/rest/url_utils.go | 97 +
vendor/k8s.io/client-go/rest/urlbackoff.go | 107 +
vendor/k8s.io/client-go/rest/warnings.go | 144 +
vendor/k8s.io/client-go/rest/watch/decoder.go | 72 +
vendor/k8s.io/client-go/rest/watch/encoder.go | 56 +
.../client-go/rest/zz_generated.deepcopy.go | 57 +
.../restmapper/category_expansion.go | 119 +
.../k8s.io/client-go/restmapper/discovery.go | 338 +
.../k8s.io/client-go/restmapper/shortcut.go | 172 +
vendor/k8s.io/client-go/testing/actions.go | 681 +
vendor/k8s.io/client-go/testing/fake.go | 216 +
vendor/k8s.io/client-go/testing/fixture.go | 564 +
vendor/k8s.io/client-go/tools/auth/OWNERS | 9 +
.../k8s.io/client-go/tools/auth/clientauth.go | 126 +
vendor/k8s.io/client-go/tools/cache/OWNERS | 43 +
.../client-go/tools/cache/controller.go | 416 +
.../client-go/tools/cache/delta_fifo.go | 737 +
vendor/k8s.io/client-go/tools/cache/doc.go | 24 +
.../client-go/tools/cache/expiration_cache.go | 215 +
.../tools/cache/expiration_cache_fakes.go | 57 +
.../tools/cache/fake_custom_store.go | 102 +
vendor/k8s.io/client-go/tools/cache/fifo.go | 376 +
vendor/k8s.io/client-go/tools/cache/heap.go | 325 +
vendor/k8s.io/client-go/tools/cache/index.go | 101 +
.../k8s.io/client-go/tools/cache/listers.go | 183 +
.../k8s.io/client-go/tools/cache/listwatch.go | 112 +
.../client-go/tools/cache/mutation_cache.go | 262 +
.../tools/cache/mutation_detector.go | 166 +
.../k8s.io/client-go/tools/cache/reflector.go | 595 +
.../tools/cache/reflector_metrics.go | 89 +
.../client-go/tools/cache/shared_informer.go | 814 +
vendor/k8s.io/client-go/tools/cache/store.go | 266 +
.../tools/cache/thread_safe_store.go | 324 +
.../client-go/tools/cache/undelta_store.go | 89 +
.../client-go/tools/clientcmd/api/doc.go | 19 +
.../client-go/tools/clientcmd/api/helpers.go | 191 +
.../tools/clientcmd/api/latest/latest.go | 61 +
.../client-go/tools/clientcmd/api/register.go | 46 +
.../client-go/tools/clientcmd/api/types.go | 286 +
.../tools/clientcmd/api/v1/conversion.go | 174 +
.../client-go/tools/clientcmd/api/v1/doc.go | 20 +
.../tools/clientcmd/api/v1/register.go | 56 +
.../client-go/tools/clientcmd/api/v1/types.go | 224 +
.../api/v1/zz_generated.conversion.go | 430 +
.../clientcmd/api/v1/zz_generated.deepcopy.go | 348 +
.../clientcmd/api/zz_generated.deepcopy.go | 324 +
.../client-go/tools/clientcmd/auth_loaders.go | 111 +
.../tools/clientcmd/client_config.go | 628 +
.../client-go/tools/clientcmd/config.go | 501 +
.../k8s.io/client-go/tools/clientcmd/doc.go | 37 +
.../k8s.io/client-go/tools/clientcmd/flag.go | 49 +
.../client-go/tools/clientcmd/helpers.go | 50 +
.../client-go/tools/clientcmd/loader.go | 649 +
.../tools/clientcmd/merged_client_builder.go | 172 +
.../client-go/tools/clientcmd/overrides.go | 251 +
.../client-go/tools/clientcmd/validation.go | 352 +
.../client-go/tools/leaderelection/OWNERS | 13 +
.../tools/leaderelection/healthzadaptor.go | 69 +
.../tools/leaderelection/leaderelection.go | 394 +
.../client-go/tools/leaderelection/metrics.go | 109 +
.../resourcelock/configmaplock.go | 121 +
.../resourcelock/endpointslock.go | 116 +
.../leaderelection/resourcelock/interface.go | 142 +
.../leaderelection/resourcelock/leaselock.go | 135 +
.../leaderelection/resourcelock/multilock.go | 104 +
vendor/k8s.io/client-go/tools/metrics/OWNERS | 6 +
.../k8s.io/client-go/tools/metrics/metrics.go | 107 +
vendor/k8s.io/client-go/tools/pager/pager.go | 247 +
.../k8s.io/client-go/tools/portforward/doc.go | 19 +
.../tools/portforward/portforward.go | 429 +
vendor/k8s.io/client-go/tools/record/OWNERS | 28 +
vendor/k8s.io/client-go/tools/record/doc.go | 19 +
vendor/k8s.io/client-go/tools/record/event.go | 380 +
.../client-go/tools/record/events_cache.go | 511 +
vendor/k8s.io/client-go/tools/record/fake.go | 54 +
.../client-go/tools/record/util/util.go | 44 +
.../k8s.io/client-go/tools/reference/ref.go | 109 +
.../client-go/tools/remotecommand/doc.go | 20 +
.../tools/remotecommand/errorstream.go | 55 +
.../client-go/tools/remotecommand/reader.go | 41 +
.../tools/remotecommand/remotecommand.go | 142 +
.../client-go/tools/remotecommand/resize.go | 33 +
.../client-go/tools/remotecommand/v1.go | 160 +
.../client-go/tools/remotecommand/v2.go | 195 +
.../client-go/tools/remotecommand/v3.go | 111 +
.../client-go/tools/remotecommand/v4.go | 119 +
vendor/k8s.io/client-go/transport/OWNERS | 9 +
vendor/k8s.io/client-go/transport/cache.go | 158 +
.../client-go/transport/cert_rotation.go | 176 +
vendor/k8s.io/client-go/transport/config.go | 145 +
.../client-go/transport/round_trippers.go | 570 +
.../k8s.io/client-go/transport/spdy/spdy.go | 98 +
.../client-go/transport/token_source.go | 158 +
.../k8s.io/client-go/transport/transport.go | 303 +
vendor/k8s.io/client-go/util/cert/OWNERS | 9 +
vendor/k8s.io/client-go/util/cert/cert.go | 206 +
vendor/k8s.io/client-go/util/cert/csr.go | 75 +
vendor/k8s.io/client-go/util/cert/io.go | 113 +
vendor/k8s.io/client-go/util/cert/pem.go | 73 +
.../client-go/util/cert/server_inspection.go | 102 +
.../util/connrotation/connrotation.go | 105 +
vendor/k8s.io/client-go/util/exec/exec.go | 52 +
.../client-go/util/flowcontrol/backoff.go | 149 +
.../client-go/util/flowcontrol/throttle.go | 159 +
.../k8s.io/client-go/util/homedir/homedir.go | 92 +
vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 +
vendor/k8s.io/client-go/util/keyutil/key.go | 323 +
vendor/k8s.io/client-go/util/retry/OWNERS | 4 +
vendor/k8s.io/client-go/util/retry/util.go | 105 +
.../util/workqueue/default_rate_limiters.go | 211 +
.../util/workqueue/delaying_queue.go | 280 +
vendor/k8s.io/client-go/util/workqueue/doc.go | 26 +
.../client-go/util/workqueue/metrics.go | 261 +
.../client-go/util/workqueue/parallelizer.go | 101 +
.../k8s.io/client-go/util/workqueue/queue.go | 212 +
.../util/workqueue/rate_limiting_queue.go | 69 +
vendor/k8s.io/cluster-bootstrap/LICENSE | 202 +
.../k8s.io/cluster-bootstrap/token/api/doc.go | 20 +
.../cluster-bootstrap/token/api/types.go | 112 +
.../cluster-bootstrap/token/util/helpers.go | 136 +
vendor/k8s.io/code-generator/CONTRIBUTING.md | 7 +
vendor/k8s.io/code-generator/LICENSE | 202 +
vendor/k8s.io/code-generator/OWNERS | 13 +
vendor/k8s.io/code-generator/README.md | 24 +
.../k8s.io/code-generator/SECURITY_CONTACTS | 16 +
.../code-generator/cmd/client-gen/OWNERS | 10 +
.../code-generator/cmd/client-gen/README.md | 4 +
.../cmd/client-gen/args/args.go | 126 +
.../cmd/client-gen/args/gvpackages.go | 183 +
.../cmd/client-gen/args/gvtype.go | 110 +
.../client-gen/generators/client_generator.go | 400 +
.../generators/fake/fake_client_generator.go | 130 +
.../fake/generator_fake_for_clientset.go | 167 +
.../fake/generator_fake_for_group.go | 130 +
.../fake/generator_fake_for_type.go | 482 +
.../generators/generator_for_clientset.go | 183 +
.../generators/generator_for_expansion.go | 54 +
.../generators/generator_for_group.go | 246 +
.../generators/generator_for_type.go | 610 +
.../generators/scheme/generator_for_scheme.go | 187 +
.../cmd/client-gen/generators/util/tags.go | 341 +
.../code-generator/cmd/client-gen/main.go | 66 +
.../cmd/client-gen/path/path.go | 31 +
.../cmd/client-gen/types/helpers.go | 121 +
.../cmd/client-gen/types/types.go | 75 +
.../cmd/conversion-gen/args/args.go | 83 +
.../conversion-gen/generators/conversion.go | 1195 +
.../code-generator/cmd/conversion-gen/main.go | 125 +
.../cmd/deepcopy-gen/args/args.go | 54 +
.../code-generator/cmd/deepcopy-gen/main.go | 85 +
.../cmd/defaulter-gen/args/args.go | 54 +
.../code-generator/cmd/defaulter-gen/main.go | 84 +
.../cmd/go-to-protobuf/.gitignore | 1 +
.../code-generator/cmd/go-to-protobuf/OWNERS | 6 +
.../code-generator/cmd/go-to-protobuf/main.go | 39 +
.../cmd/go-to-protobuf/protobuf/cmd.go | 460 +
.../cmd/go-to-protobuf/protobuf/generator.go | 773 +
.../go-to-protobuf/protobuf/import_tracker.go | 50 +
.../cmd/go-to-protobuf/protobuf/namer.go | 208 +
.../cmd/go-to-protobuf/protobuf/package.go | 215 +
.../cmd/go-to-protobuf/protobuf/parser.go | 464 +
.../cmd/go-to-protobuf/protobuf/tags.go | 33 +
.../go-to-protobuf/protoc-gen-gogo/main.go | 32 +
.../code-generator/cmd/import-boss/.gitignore | 1 +
.../code-generator/cmd/import-boss/README.md | 97 +
.../code-generator/cmd/import-boss/main.go | 49 +
.../cmd/informer-gen/args/args.go | 83 +
.../cmd/informer-gen/generators/factory.go | 258 +
.../generators/factoryinterface.go | 90 +
.../cmd/informer-gen/generators/generic.go | 184 +
.../informer-gen/generators/groupinterface.go | 118 +
.../cmd/informer-gen/generators/informer.go | 186 +
.../cmd/informer-gen/generators/packages.go | 354 +
.../cmd/informer-gen/generators/types.go | 42 +
.../generators/versioninterface.go | 109 +
.../code-generator/cmd/informer-gen/main.go | 63 +
.../cmd/lister-gen/args/args.go | 64 +
.../cmd/lister-gen/generators/expansion.go | 67 +
.../cmd/lister-gen/generators/lister.go | 376 +
.../code-generator/cmd/lister-gen/main.go | 60 +
.../code-generator/cmd/openapi-gen/main.go | 57 +
.../cmd/register-gen/args/args.go | 39 +
.../cmd/register-gen/generators/packages.go | 137 +
.../generators/register_external.go | 117 +
.../code-generator/cmd/register-gen/main.go | 53 +
.../code-generator/cmd/set-gen/.gitignore | 1 +
.../k8s.io/code-generator/cmd/set-gen/main.go | 56 +
.../k8s.io/code-generator/code-of-conduct.md | 3 +
.../k8s.io/code-generator/generate-groups.sh | 95 +
.../generate-internal-groups.sh | 122 +
vendor/k8s.io/code-generator/go.mod | 23 +
vendor/k8s.io/code-generator/go.sum | 141 +
.../code-generator/pkg/namer/tag-override.go | 58 +
.../k8s.io/code-generator/pkg/util/build.go | 61 +
.../pkg/util/plural_exceptions.go | 37 +
.../third_party/forked/golang/reflect/type.go | 91 +
vendor/k8s.io/code-generator/tools.go | 35 +
vendor/k8s.io/component-base/LICENSE | 202 +
vendor/k8s.io/component-base/config/OWNERS | 14 +
vendor/k8s.io/component-base/config/doc.go | 19 +
vendor/k8s.io/component-base/config/types.go | 88 +
.../config/v1alpha1/conversion.go | 61 +
.../config/v1alpha1/defaults.go | 112 +
.../component-base/config/v1alpha1/doc.go | 20 +
.../config/v1alpha1/register.go | 31 +
.../component-base/config/v1alpha1/types.go | 90 +
.../v1alpha1/zz_generated.conversion.go | 152 +
.../config/v1alpha1/zz_generated.deepcopy.go | 103 +
.../config/zz_generated.deepcopy.go | 88 +
vendor/k8s.io/gengo/LICENSE | 202 +
vendor/k8s.io/gengo/args/args.go | 212 +
.../deepcopy-gen/generators/deepcopy.go | 924 +
.../defaulter-gen/generators/defaulter.go | 832 +
.../import-boss/generators/import_restrict.go | 419 +
.../gengo/examples/set-gen/generators/sets.go | 362 +
.../gengo/examples/set-gen/generators/tags.go | 33 +
.../gengo/examples/set-gen/sets/byte.go | 205 +
.../k8s.io/gengo/examples/set-gen/sets/doc.go | 20 +
.../gengo/examples/set-gen/sets/empty.go | 23 +
.../k8s.io/gengo/examples/set-gen/sets/int.go | 205 +
.../gengo/examples/set-gen/sets/int64.go | 205 +
.../gengo/examples/set-gen/sets/string.go | 205 +
.../gengo/generator/default_generator.go | 62 +
.../k8s.io/gengo/generator/default_package.go | 75 +
vendor/k8s.io/gengo/generator/doc.go | 31 +
.../k8s.io/gengo/generator/error_tracker.go | 50 +
vendor/k8s.io/gengo/generator/execute.go | 314 +
vendor/k8s.io/gengo/generator/generator.go | 256 +
.../k8s.io/gengo/generator/import_tracker.go | 70 +
.../k8s.io/gengo/generator/snippet_writer.go | 154 +
.../gengo/generator/transitive_closure.go | 65 +
vendor/k8s.io/gengo/namer/doc.go | 31 +
vendor/k8s.io/gengo/namer/import_tracker.go | 112 +
vendor/k8s.io/gengo/namer/namer.go | 383 +
vendor/k8s.io/gengo/namer/order.go | 72 +
vendor/k8s.io/gengo/namer/plural_namer.go | 120 +
vendor/k8s.io/gengo/parser/doc.go | 19 +
vendor/k8s.io/gengo/parser/parse.go | 861 +
vendor/k8s.io/gengo/types/comments.go | 82 +
vendor/k8s.io/gengo/types/doc.go | 19 +
vendor/k8s.io/gengo/types/flatten.go | 57 +
vendor/k8s.io/gengo/types/types.go | 526 +
vendor/k8s.io/helm/LICENSE | 202 +
.../k8s.io/helm/pkg/chartutil/capabilities.go | 71 +
vendor/k8s.io/helm/pkg/chartutil/chartfile.go | 98 +
vendor/k8s.io/helm/pkg/chartutil/create.go | 448 +
vendor/k8s.io/helm/pkg/chartutil/doc.go | 44 +
vendor/k8s.io/helm/pkg/chartutil/expand.go | 86 +
vendor/k8s.io/helm/pkg/chartutil/files.go | 236 +
vendor/k8s.io/helm/pkg/chartutil/load.go | 328 +
.../k8s.io/helm/pkg/chartutil/requirements.go | 471 +
vendor/k8s.io/helm/pkg/chartutil/save.go | 227 +
vendor/k8s.io/helm/pkg/chartutil/transform.go | 25 +
vendor/k8s.io/helm/pkg/chartutil/values.go | 451 +
vendor/k8s.io/helm/pkg/engine/doc.go | 23 +
vendor/k8s.io/helm/pkg/engine/engine.go | 374 +
vendor/k8s.io/helm/pkg/ignore/doc.go | 67 +
vendor/k8s.io/helm/pkg/ignore/rules.go | 221 +
vendor/k8s.io/helm/pkg/manifest/doc.go | 23 +
vendor/k8s.io/helm/pkg/manifest/splitter.go | 46 +
vendor/k8s.io/helm/pkg/manifest/types.go | 28 +
.../helm/pkg/proto/hapi/chart/chart.pb.go | 119 +
.../helm/pkg/proto/hapi/chart/config.pb.go | 78 +
.../helm/pkg/proto/hapi/chart/metadata.pb.go | 276 +
.../helm/pkg/proto/hapi/chart/template.pb.go | 60 +
.../helm/pkg/proto/hapi/release/hook.pb.go | 235 +
.../helm/pkg/proto/hapi/release/info.pb.go | 90 +
.../helm/pkg/proto/hapi/release/release.pb.go | 124 +
.../helm/pkg/proto/hapi/release/status.pb.go | 141 +
.../pkg/proto/hapi/release/test_run.pb.go | 118 +
.../pkg/proto/hapi/release/test_suite.pb.go | 73 +
.../helm/pkg/proto/hapi/version/version.pb.go | 81 +
vendor/k8s.io/helm/pkg/releaseutil/filter.go | 78 +
.../k8s.io/helm/pkg/releaseutil/manifest.go | 59 +
vendor/k8s.io/helm/pkg/releaseutil/sorter.go | 100 +
vendor/k8s.io/helm/pkg/sympath/walk.go | 115 +
vendor/k8s.io/helm/pkg/timeconv/doc.go | 23 +
vendor/k8s.io/helm/pkg/timeconv/timeconv.go | 58 +
vendor/k8s.io/helm/pkg/version/compatible.go | 65 +
vendor/k8s.io/helm/pkg/version/doc.go | 18 +
vendor/k8s.io/helm/pkg/version/version.go | 54 +
vendor/k8s.io/klog/.travis.yml | 16 +
vendor/k8s.io/klog/CONTRIBUTING.md | 22 +
vendor/k8s.io/klog/LICENSE | 191 +
vendor/k8s.io/klog/OWNERS | 19 +
vendor/k8s.io/klog/README.md | 97 +
vendor/k8s.io/klog/RELEASE.md | 9 +
vendor/k8s.io/klog/SECURITY_CONTACTS | 20 +
vendor/k8s.io/klog/code-of-conduct.md | 3 +
vendor/k8s.io/klog/go.mod | 5 +
vendor/k8s.io/klog/go.sum | 2 +
vendor/k8s.io/klog/klog.go | 1308 +
vendor/k8s.io/klog/klog_file.go | 139 +
vendor/k8s.io/klog/v2/.gitignore | 17 +
vendor/k8s.io/klog/v2/CONTRIBUTING.md | 22 +
vendor/k8s.io/klog/v2/LICENSE | 191 +
vendor/k8s.io/klog/v2/OWNERS | 19 +
vendor/k8s.io/klog/v2/README.md | 99 +
vendor/k8s.io/klog/v2/RELEASE.md | 9 +
vendor/k8s.io/klog/v2/SECURITY_CONTACTS | 20 +
vendor/k8s.io/klog/v2/code-of-conduct.md | 3 +
vendor/k8s.io/klog/v2/go.mod | 5 +
vendor/k8s.io/klog/v2/go.sum | 2 +
vendor/k8s.io/klog/v2/klog.go | 1525 +
vendor/k8s.io/klog/v2/klog_file.go | 164 +
vendor/k8s.io/kube-aggregator/LICENSE | 202 +
.../pkg/apis/apiregistration/doc.go | 21 +
.../pkg/apis/apiregistration/helpers.go | 128 +
.../pkg/apis/apiregistration/register.go | 54 +
.../pkg/apis/apiregistration/types.go | 146 +
.../pkg/apis/apiregistration/v1/defaults.go | 33 +
.../pkg/apis/apiregistration/v1/doc.go | 37 +
.../apis/apiregistration/v1/generated.pb.go | 1832 +
.../apis/apiregistration/v1/generated.proto | 144 +
.../pkg/apis/apiregistration/v1/register.go | 61 +
.../pkg/apis/apiregistration/v1/types.go | 155 +
.../v1/zz_generated.conversion.go | 298 +
.../v1/zz_generated.deepcopy.go | 173 +
.../v1/zz_generated.defaults.go | 47 +
.../apis/apiregistration/v1beta1/defaults.go | 33 +
.../pkg/apis/apiregistration/v1beta1/doc.go | 38 +
.../apiregistration/v1beta1/generated.pb.go | 1832 +
.../apiregistration/v1beta1/generated.proto | 144 +
.../apis/apiregistration/v1beta1/register.go | 61 +
.../pkg/apis/apiregistration/v1beta1/types.go | 161 +
.../v1beta1/zz_generated.conversion.go | 298 +
.../v1beta1/zz_generated.deepcopy.go | 173 +
.../v1beta1/zz_generated.defaults.go | 47 +
.../zz_generated.prerelease-lifecycle.go | 73 +
.../apiregistration/zz_generated.deepcopy.go | 220 +
.../clientset/clientset.go | 111 +
.../clientset_generated/clientset/doc.go | 20 +
.../clientset/scheme/doc.go | 20 +
.../clientset/scheme/register.go | 58 +
.../v1/apiregistration_client.go | 89 +
.../typed/apiregistration/v1/apiservice.go | 184 +
.../clientset/typed/apiregistration/v1/doc.go | 20 +
.../apiregistration/v1/generated_expansion.go | 21 +
.../v1beta1/apiregistration_client.go | 89 +
.../apiregistration/v1beta1/apiservice.go | 184 +
.../typed/apiregistration/v1beta1/doc.go | 20 +
.../v1beta1/generated_expansion.go | 21 +
vendor/k8s.io/kube-openapi/LICENSE | 202 +
.../kube-openapi/cmd/openapi-gen/args/args.go | 76 +
.../cmd/openapi-gen/openapi-gen.go | 57 +
.../k8s.io/kube-openapi/pkg/common/common.go | 192 +
vendor/k8s.io/kube-openapi/pkg/common/doc.go | 19 +
.../kube-openapi/pkg/generators/README.md | 49 +
.../kube-openapi/pkg/generators/api_linter.go | 220 +
.../kube-openapi/pkg/generators/config.go | 91 +
.../kube-openapi/pkg/generators/extension.go | 198 +
.../kube-openapi/pkg/generators/openapi.go | 692 +
.../kube-openapi/pkg/generators/rules/OWNERS | 4 +
.../kube-openapi/pkg/generators/rules/doc.go | 23 +
.../pkg/generators/rules/idl_tag.go | 53 +
.../pkg/generators/rules/names_match.go | 172 +
.../generators/rules/omitempty_match_case.go | 64 +
.../kube-openapi/pkg/generators/union.go | 207 +
.../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 +
.../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 +
.../kube-openapi/pkg/util/proto/document.go | 318 +
.../kube-openapi/pkg/util/proto/openapi.go | 278 +
.../kube-openapi/pkg/util/sets/empty.go | 27 +
.../kube-openapi/pkg/util/sets/string.go | 207 +
vendor/k8s.io/utils/LICENSE | 202 +
vendor/k8s.io/utils/buffer/ring_growing.go | 72 +
vendor/k8s.io/utils/integer/integer.go | 73 +
vendor/k8s.io/utils/pointer/OWNERS | 10 +
vendor/k8s.io/utils/pointer/README.md | 3 +
vendor/k8s.io/utils/pointer/pointer.go | 131 +
vendor/k8s.io/utils/trace/README.md | 67 +
vendor/k8s.io/utils/trace/trace.go | 282 +
vendor/modules.txt | 818 +
vendor/sigs.k8s.io/cli-utils/LICENSE | 201 +
vendor/sigs.k8s.io/cli-utils/LICENSE_TEMPLATE | 2 +
.../cli-utils/pkg/kstatus/status/core.go | 585 +
.../cli-utils/pkg/kstatus/status/doc.go | 41 +
.../cli-utils/pkg/kstatus/status/generic.go | 100 +
.../cli-utils/pkg/kstatus/status/status.go | 241 +
.../cli-utils/pkg/kstatus/status/util.go | 143 +
.../sigs.k8s.io/controller-runtime/.gitignore | 24 +
.../controller-runtime/.golangci.yml | 35 +
.../controller-runtime/CONTRIBUTING.md | 19 +
vendor/sigs.k8s.io/controller-runtime/FAQ.md | 81 +
vendor/sigs.k8s.io/controller-runtime/LICENSE | 201 +
.../sigs.k8s.io/controller-runtime/Makefile | 110 +
vendor/sigs.k8s.io/controller-runtime/OWNERS | 10 +
.../controller-runtime/OWNERS_ALIASES | 39 +
.../sigs.k8s.io/controller-runtime/README.md | 66 +
.../controller-runtime/SECURITY_CONTACTS | 15 +
.../controller-runtime/TMP-LOGGING.md | 169 +
.../controller-runtime/VERSIONING.md | 30 +
.../sigs.k8s.io/controller-runtime/alias.go | 150 +
.../controller-runtime/code-of-conduct.md | 3 +
vendor/sigs.k8s.io/controller-runtime/doc.go | 127 +
vendor/sigs.k8s.io/controller-runtime/go.mod | 30 +
vendor/sigs.k8s.io/controller-runtime/go.sum | 644 +
.../pkg/builder/controller.go | 311 +
.../controller-runtime/pkg/builder/doc.go | 28 +
.../controller-runtime/pkg/builder/options.go | 117 +
.../controller-runtime/pkg/builder/webhook.go | 167 +
.../controller-runtime/pkg/cache/cache.go | 141 +
.../controller-runtime/pkg/cache/doc.go | 19 +
.../pkg/cache/informer_cache.go | 218 +
.../pkg/cache/internal/cache_reader.go | 185 +
.../pkg/cache/internal/deleg_map.go | 120 +
.../pkg/cache/internal/informers_map.go | 352 +
.../pkg/cache/multi_namespace_cache.go | 222 +
.../pkg/client/apiutil/apimachinery.go | 151 +
.../pkg/client/apiutil/dynamicrestmapper.go | 285 +
.../controller-runtime/pkg/client/client.go | 264 +
.../pkg/client/client_cache.go | 151 +
.../controller-runtime/pkg/client/codec.go | 24 +
.../pkg/client/config/config.go | 158 +
.../pkg/client/config/doc.go | 18 +
.../controller-runtime/pkg/client/doc.go | 49 +
.../controller-runtime/pkg/client/dryrun.go | 106 +
.../pkg/client/interfaces.go | 136 +
.../pkg/client/metadata_client.go | 193 +
.../pkg/client/namespaced_client.go | 253 +
.../controller-runtime/pkg/client/object.go | 77 +
.../controller-runtime/pkg/client/options.go | 697 +
.../controller-runtime/pkg/client/patch.go | 186 +
.../controller-runtime/pkg/client/split.go | 132 +
.../pkg/client/typed_client.go | 205 +
.../pkg/client/unstructured_client.go | 277 +
.../controller-runtime/pkg/config/config.go | 114 +
.../controller-runtime/pkg/config/doc.go | 25 +
.../pkg/config/v1alpha1/doc.go | 20 +
.../pkg/config/v1alpha1/register.go | 37 +
.../pkg/config/v1alpha1/types.go | 127 +
.../config/v1alpha1/zz_generated.deepcopy.go | 119 +
.../pkg/controller/controller.go | 127 +
.../controllerutil/controllerutil.go | 377 +
.../pkg/controller/controllerutil/doc.go | 20 +
.../controller-runtime/pkg/controller/doc.go | 25 +
.../pkg/conversion/conversion.go | 40 +
.../controller-runtime/pkg/event/doc.go | 28 +
.../controller-runtime/pkg/event/event.go | 55 +
.../controller-runtime/pkg/handler/doc.go | 38 +
.../controller-runtime/pkg/handler/enqueue.go | 91 +
.../pkg/handler/enqueue_mapped.go | 89 +
.../pkg/handler/enqueue_owner.go | 188 +
.../pkg/handler/eventhandler.go | 104 +
.../controller-runtime/pkg/healthz/doc.go | 32 +
.../controller-runtime/pkg/healthz/healthz.go | 207 +
.../pkg/internal/controller/controller.go | 305 +
.../internal/controller/metrics/metrics.go | 77 +
.../pkg/internal/log/log.go | 32 +
.../pkg/internal/recorder/recorder.go | 156 +
.../pkg/leaderelection/doc.go | 24 +
.../pkg/leaderelection/leader_election.go | 120 +
.../controller-runtime/pkg/log/deleg.go | 137 +
.../controller-runtime/pkg/log/log.go | 99 +
.../controller-runtime/pkg/log/null.go | 60 +
.../controller-runtime/pkg/log/zap/flags.go | 131 +
.../pkg/log/zap/kube_helpers.go | 129 +
.../controller-runtime/pkg/log/zap/zap.go | 275 +
.../pkg/manager/client_builder.go | 61 +
.../controller-runtime/pkg/manager/doc.go | 21 +
.../pkg/manager/internal.go | 695 +
.../controller-runtime/pkg/manager/manager.go | 606 +
.../pkg/manager/signals/doc.go | 20 +
.../pkg/manager/signals/signal.go | 45 +
.../pkg/manager/signals/signal_posix.go | 26 +
.../pkg/manager/signals/signal_windows.go | 23 +
.../pkg/metrics/client_go_adapter.go | 213 +
.../controller-runtime/pkg/metrics/doc.go | 20 +
.../pkg/metrics/listener.go | 52 +
.../pkg/metrics/registry.go | 30 +
.../pkg/metrics/workqueue.go | 130 +
.../controller-runtime/pkg/predicate/doc.go | 20 +
.../pkg/predicate/predicate.go | 303 +
.../controller-runtime/pkg/ratelimiter/doc.go | 22 +
.../pkg/ratelimiter/ratelimiter.go | 30 +
.../controller-runtime/pkg/reconcile/doc.go | 21 +
.../pkg/reconcile/reconcile.go | 102 +
.../pkg/recorder/recorder.go | 31 +
.../pkg/runtime/inject/doc.go | 22 +
.../pkg/runtime/inject/inject.go | 162 +
.../controller-runtime/pkg/scheme/scheme.go | 94 +
.../controller-runtime/pkg/source/doc.go | 22 +
.../pkg/source/internal/eventsource.go | 138 +
.../controller-runtime/pkg/source/source.go | 322 +
.../pkg/webhook/admission/decode.go | 76 +
.../pkg/webhook/admission/defaulter.go | 75 +
.../pkg/webhook/admission/doc.go | 28 +
.../pkg/webhook/admission/http.go | 144 +
.../pkg/webhook/admission/inject.go | 31 +
.../pkg/webhook/admission/multi.go | 127 +
.../pkg/webhook/admission/response.go | 121 +
.../pkg/webhook/admission/validator.go | 122 +
.../pkg/webhook/admission/webhook.go | 200 +
.../controller-runtime/pkg/webhook/alias.go | 73 +
.../pkg/webhook/conversion/conversion.go | 345 +
.../pkg/webhook/conversion/decoder.go | 31 +
.../controller-runtime/pkg/webhook/doc.go | 28 +
.../internal/certwatcher/certwatcher.go | 163 +
.../pkg/webhook/internal/metrics/metrics.go | 61 +
.../controller-runtime/pkg/webhook/server.go | 268 +
vendor/sigs.k8s.io/controller-tools/LICENSE | 201 +
.../cmd/controller-gen/main.go | 263 +
.../controller-tools/pkg/crd/conv.go | 122 +
.../controller-tools/pkg/crd/desc_visitor.go | 78 +
.../controller-tools/pkg/crd/doc.go | 63 +
.../controller-tools/pkg/crd/flatten.go | 441 +
.../controller-tools/pkg/crd/gen.go | 351 +
.../controller-tools/pkg/crd/known_types.go | 128 +
.../controller-tools/pkg/crd/markers/crd.go | 318 +
.../controller-tools/pkg/crd/markers/doc.go | 46 +
.../pkg/crd/markers/package.go | 40 +
.../pkg/crd/markers/register.go | 83 +
.../pkg/crd/markers/topology.go | 155 +
.../pkg/crd/markers/validation.go | 391 +
.../crd/markers/zz_generated.markerhelp.go | 430 +
.../controller-tools/pkg/crd/parser.go | 237 +
.../controller-tools/pkg/crd/schema.go | 427 +
.../pkg/crd/schema_visitor.go | 131 +
.../controller-tools/pkg/crd/spec.go | 174 +
.../pkg/crd/zz_generated.markerhelp.go | 57 +
.../controller-tools/pkg/deepcopy/doc.go | 23 +
.../controller-tools/pkg/deepcopy/gen.go | 304 +
.../controller-tools/pkg/deepcopy/traverse.go | 829 +
.../pkg/deepcopy/zz_generated.markerhelp.go | 45 +
.../controller-tools/pkg/genall/doc.go | 58 +
.../controller-tools/pkg/genall/genall.go | 215 +
.../controller-tools/pkg/genall/help/doc.go | 23 +
.../pkg/genall/help/pretty/doc.go | 30 +
.../pkg/genall/help/pretty/help.go | 171 +
.../pkg/genall/help/pretty/print.go | 304 +
.../pkg/genall/help/pretty/table.go | 64 +
.../controller-tools/pkg/genall/help/sort.go | 106 +
.../controller-tools/pkg/genall/help/types.go | 215 +
.../controller-tools/pkg/genall/input.go | 37 +
.../controller-tools/pkg/genall/options.go | 192 +
.../controller-tools/pkg/genall/output.go | 160 +
.../pkg/genall/zz_generated.markerhelp.go | 89 +
.../controller-tools/pkg/loader/doc.go | 60 +
.../controller-tools/pkg/loader/errors.go | 67 +
.../controller-tools/pkg/loader/loader.go | 360 +
.../controller-tools/pkg/loader/paths.go | 32 +
.../controller-tools/pkg/loader/refs.go | 268 +
.../controller-tools/pkg/loader/visit.go | 81 +
.../controller-tools/pkg/markers/collect.go | 422 +
.../controller-tools/pkg/markers/doc.go | 113 +
.../controller-tools/pkg/markers/help.go | 81 +
.../controller-tools/pkg/markers/parse.go | 923 +
.../controller-tools/pkg/markers/reg.go | 153 +
.../controller-tools/pkg/markers/regutil.go | 36 +
.../controller-tools/pkg/markers/zip.go | 191 +
.../controller-tools/pkg/rbac/parser.go | 267 +
.../pkg/rbac/zz_generated.markerhelp.go | 77 +
.../controller-tools/pkg/schemapatcher/gen.go | 513 +
.../schemapatcher/internal/yaml/convert.go | 61 +
.../pkg/schemapatcher/internal/yaml/nested.go | 87 +
.../pkg/schemapatcher/internal/yaml/set.go | 80 +
.../schemapatcher/zz_generated.markerhelp.go | 45 +
.../controller-tools/pkg/version/version.go | 49 +
.../controller-tools/pkg/webhook/conv.go | 44 +
.../controller-tools/pkg/webhook/parser.go | 426 +
.../pkg/webhook/zz_generated.markerhelp.go | 96 +
.../structured-merge-diff/v4/LICENSE | 201 +
.../v4/value/allocator.go | 203 +
.../structured-merge-diff/v4/value/doc.go | 21 +
.../structured-merge-diff/v4/value/fields.go | 97 +
.../v4/value/jsontagutil.go | 91 +
.../structured-merge-diff/v4/value/list.go | 139 +
.../v4/value/listreflect.go | 98 +
.../v4/value/listunstructured.go | 74 +
.../structured-merge-diff/v4/value/map.go | 270 +
.../v4/value/mapreflect.go | 209 +
.../v4/value/mapunstructured.go | 190 +
.../v4/value/reflectcache.go | 463 +
.../structured-merge-diff/v4/value/scalar.go | 50 +
.../v4/value/structreflect.go | 208 +
.../structured-merge-diff/v4/value/value.go | 347 +
.../v4/value/valuereflect.go | 294 +
.../v4/value/valueunstructured.go | 178 +
vendor/sigs.k8s.io/yaml/.gitignore | 20 +
vendor/sigs.k8s.io/yaml/.travis.yml | 13 +
vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 +
vendor/sigs.k8s.io/yaml/LICENSE | 50 +
vendor/sigs.k8s.io/yaml/OWNERS | 27 +
vendor/sigs.k8s.io/yaml/README.md | 123 +
vendor/sigs.k8s.io/yaml/RELEASE.md | 9 +
vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 +
vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 +
vendor/sigs.k8s.io/yaml/fields.go | 502 +
vendor/sigs.k8s.io/yaml/go.mod | 8 +
vendor/sigs.k8s.io/yaml/go.sum | 9 +
vendor/sigs.k8s.io/yaml/yaml.go | 380 +
vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 +
4164 files changed, 1264675 insertions(+)
create mode 100755 .ci/component_descriptor
create mode 100644 .ci/pipeline_definitions
create mode 100755 .ci/prepare_release
create mode 100755 .ci/set_dependency_version
create mode 100755 .ci/verify
create mode 100644 .circleci/config.yaml
create mode 100644 .dockerignore
create mode 100644 .github/ISSUE_TEMPLATE/bug.md
create mode 100644 .github/ISSUE_TEMPLATE/feature.md
create mode 100644 .github/ISSUE_TEMPLATE/flaking-test.md
create mode 100644 .github/ISSUE_TEMPLATE/support.md
create mode 100644 .github/pull_request_template.md
create mode 100644 .gitignore
create mode 100644 .golangci.yaml
create mode 100644 Dockerfile
create mode 100644 LICENSE.md
create mode 100644 Makefile
create mode 100644 NOTICE.md
create mode 100644 README.md
create mode 100644 VERSION
create mode 100644 charts/gardener-extension-shoot-cert-service/.helmignore
create mode 100644 charts/gardener-extension-shoot-cert-service/Chart.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/doc.go
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/_helpers.tpl
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/configmap-imagevector-overwrite.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/deployment.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/fleet-config.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/priorityclass.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/rbac-shoot-cert-service.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/rbac.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/serviceaccount.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/templates/vpa.yaml
create mode 100644 charts/gardener-extension-shoot-cert-service/values.yaml
create mode 100644 cmd/gardener-extension-shoot-fleet-agent/app/app.go
create mode 100644 cmd/gardener-extension-shoot-fleet-agent/app/options.go
create mode 100644 cmd/gardener-extension-shoot-fleet-agent/main.go
create mode 100644 docs/installation/setup.md
create mode 100644 docs/usage/register_cluster.md
create mode 100644 example/00-config.yaml
create mode 100644 example/10-fake-shoot-controlplane.yaml
create mode 100644 example/20-crd-cluster.yaml
create mode 100644 example/20-crd-extension.yaml
create mode 100644 example/20-crd-issuer.yaml
create mode 100644 example/20-crd-managedresource.yaml
create mode 100644 example/25-rbac.yaml
create mode 100644 example/30-cluster.yaml
create mode 100644 example/30-extension.yaml
create mode 100644 example/controller-registration.yaml
create mode 100644 go.mod
create mode 100644 go.sum
create mode 100644 hack/api-reference/config.json
create mode 100644 hack/api-reference/config.md
create mode 100755 hack/component_descriptor
create mode 100644 hack/tools.go
create mode 100755 hack/update-codegen.sh
create mode 100755 hack/update-github-templates.sh
create mode 100644 pkg/apis/config/doc.go
create mode 100644 pkg/apis/config/install/install.go
create mode 100644 pkg/apis/config/loader/loader.go
create mode 100644 pkg/apis/config/register.go
create mode 100644 pkg/apis/config/types.go
create mode 100644 pkg/apis/config/v1alpha1/defaults.go
create mode 100644 pkg/apis/config/v1alpha1/doc.go
create mode 100644 pkg/apis/config/v1alpha1/register.go
create mode 100644 pkg/apis/config/v1alpha1/types.go
create mode 100644 pkg/apis/config/v1alpha1/zz_generated.conversion.go
create mode 100644 pkg/apis/config/v1alpha1/zz_generated.deepcopy.go
create mode 100644 pkg/apis/config/v1alpha1/zz_generated.defaults.go
create mode 100644 pkg/apis/config/zz_generated.deepcopy.go
create mode 100644 pkg/client/fleet/clientset/versioned/clientset.go
create mode 100644 pkg/client/fleet/clientset/versioned/doc.go
create mode 100644 pkg/client/fleet/clientset/versioned/fake/clientset_generated.go
create mode 100644 pkg/client/fleet/clientset/versioned/fake/doc.go
create mode 100644 pkg/client/fleet/clientset/versioned/fake/register.go
create mode 100644 pkg/client/fleet/clientset/versioned/scheme/doc.go
create mode 100644 pkg/client/fleet/clientset/versioned/scheme/register.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundle.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundledeployment.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundlenamespacemapping.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/cluster.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clustergroup.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistration.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistrationtoken.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/content.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/doc.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/doc.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundle.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundledeployment.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundlenamespacemapping.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_cluster.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clustergroup.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistration.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistrationtoken.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_content.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_fleet.cattle.io_client.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitrepo.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitreporestriction.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fleet.cattle.io_client.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/generated_expansion.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitrepo.go
create mode 100644 pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitreporestriction.go
create mode 100644 pkg/cmd/options.go
create mode 100644 pkg/controller/actuator.go
create mode 100644 pkg/controller/add.go
create mode 100644 pkg/controller/config/config.go
create mode 100644 pkg/controller/fleetmanager.go
create mode 100644 pkg/controller/healthcheck/add.go
create mode 100644 pkg/controller/utils.go
create mode 100644 pkg/imagevector/imagevector.go
create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore
create mode 100644 vendor/github.com/BurntSushi/toml/.travis.yml
create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE
create mode 100644 vendor/github.com/BurntSushi/toml/COPYING
create mode 100644 vendor/github.com/BurntSushi/toml/Makefile
create mode 100644 vendor/github.com/BurntSushi/toml/README.md
create mode 100644 vendor/github.com/BurntSushi/toml/decode.go
create mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go
create mode 100644 vendor/github.com/BurntSushi/toml/doc.go
create mode 100644 vendor/github.com/BurntSushi/toml/encode.go
create mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go
create mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
create mode 100644 vendor/github.com/BurntSushi/toml/lex.go
create mode 100644 vendor/github.com/BurntSushi/toml/parse.go
create mode 100644 vendor/github.com/BurntSushi/toml/session.vim
create mode 100644 vendor/github.com/BurntSushi/toml/type_check.go
create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go
create mode 100644 vendor/github.com/Masterminds/goutils/.travis.yml
create mode 100644 vendor/github.com/Masterminds/goutils/CHANGELOG.md
create mode 100644 vendor/github.com/Masterminds/goutils/LICENSE.txt
create mode 100644 vendor/github.com/Masterminds/goutils/README.md
create mode 100644 vendor/github.com/Masterminds/goutils/appveyor.yml
create mode 100644 vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
create mode 100644 vendor/github.com/Masterminds/goutils/randomstringutils.go
create mode 100644 vendor/github.com/Masterminds/goutils/stringutils.go
create mode 100644 vendor/github.com/Masterminds/goutils/wordutils.go
create mode 100644 vendor/github.com/Masterminds/semver/.travis.yml
create mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md
create mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt
create mode 100644 vendor/github.com/Masterminds/semver/Makefile
create mode 100644 vendor/github.com/Masterminds/semver/README.md
create mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml
create mode 100644 vendor/github.com/Masterminds/semver/collection.go
create mode 100644 vendor/github.com/Masterminds/semver/constraints.go
create mode 100644 vendor/github.com/Masterminds/semver/doc.go
create mode 100644 vendor/github.com/Masterminds/semver/version.go
create mode 100644 vendor/github.com/Masterminds/semver/version_fuzz.go
create mode 100644 vendor/github.com/Masterminds/sprig/.gitignore
create mode 100644 vendor/github.com/Masterminds/sprig/.travis.yml
create mode 100644 vendor/github.com/Masterminds/sprig/CHANGELOG.md
create mode 100644 vendor/github.com/Masterminds/sprig/LICENSE.txt
create mode 100644 vendor/github.com/Masterminds/sprig/Makefile
create mode 100644 vendor/github.com/Masterminds/sprig/README.md
create mode 100644 vendor/github.com/Masterminds/sprig/appveyor.yml
create mode 100644 vendor/github.com/Masterminds/sprig/crypto.go
create mode 100644 vendor/github.com/Masterminds/sprig/date.go
create mode 100644 vendor/github.com/Masterminds/sprig/defaults.go
create mode 100644 vendor/github.com/Masterminds/sprig/dict.go
create mode 100644 vendor/github.com/Masterminds/sprig/doc.go
create mode 100644 vendor/github.com/Masterminds/sprig/functions.go
create mode 100644 vendor/github.com/Masterminds/sprig/glide.yaml
create mode 100644 vendor/github.com/Masterminds/sprig/list.go
create mode 100644 vendor/github.com/Masterminds/sprig/network.go
create mode 100644 vendor/github.com/Masterminds/sprig/numeric.go
create mode 100644 vendor/github.com/Masterminds/sprig/reflect.go
create mode 100644 vendor/github.com/Masterminds/sprig/regex.go
create mode 100644 vendor/github.com/Masterminds/sprig/semver.go
create mode 100644 vendor/github.com/Masterminds/sprig/strings.go
create mode 100644 vendor/github.com/Masterminds/sprig/url.go
create mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore
create mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml
create mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE
create mode 100644 vendor/github.com/PuerkitoBio/purell/README.md
create mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go
create mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml
create mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE
create mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md
create mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum
create mode 100644 vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go
create mode 100644 vendor/github.com/beorn7/perks/LICENSE
create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt
create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go
create mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml
create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt
create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md
create mode 100644 vendor/github.com/cespare/xxhash/v2/go.mod
create mode 100644 vendor/github.com/cespare/xxhash/v2/go.sum
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/.travis.yml
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/LICENSE
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/README.md
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/VERSION
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/join.go
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf
create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vfs.go
create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go
create mode 100644 vendor/github.com/docker/spdystream/CONTRIBUTING.md
create mode 100644 vendor/github.com/docker/spdystream/LICENSE
create mode 100644 vendor/github.com/docker/spdystream/LICENSE.docs
create mode 100644 vendor/github.com/docker/spdystream/MAINTAINERS
create mode 100644 vendor/github.com/docker/spdystream/README.md
create mode 100644 vendor/github.com/docker/spdystream/connection.go
create mode 100644 vendor/github.com/docker/spdystream/handlers.go
create mode 100644 vendor/github.com/docker/spdystream/priority.go
create mode 100644 vendor/github.com/docker/spdystream/spdy/dictionary.go
create mode 100644 vendor/github.com/docker/spdystream/spdy/read.go
create mode 100644 vendor/github.com/docker/spdystream/spdy/types.go
create mode 100644 vendor/github.com/docker/spdystream/spdy/write.go
create mode 100644 vendor/github.com/docker/spdystream/stream.go
create mode 100644 vendor/github.com/docker/spdystream/utils.go
create mode 100644 vendor/github.com/emicklei/go-restful/.gitignore
create mode 100644 vendor/github.com/emicklei/go-restful/.travis.yml
create mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md
create mode 100644 vendor/github.com/emicklei/go-restful/LICENSE
create mode 100644 vendor/github.com/emicklei/go-restful/Makefile
create mode 100644 vendor/github.com/emicklei/go-restful/README.md
create mode 100644 vendor/github.com/emicklei/go-restful/Srcfile
create mode 100644 vendor/github.com/emicklei/go-restful/bench_test.sh
create mode 100644 vendor/github.com/emicklei/go-restful/compress.go
create mode 100644 vendor/github.com/emicklei/go-restful/compressor_cache.go
create mode 100644 vendor/github.com/emicklei/go-restful/compressor_pools.go
create mode 100644 vendor/github.com/emicklei/go-restful/compressors.go
create mode 100644 vendor/github.com/emicklei/go-restful/constants.go
create mode 100644 vendor/github.com/emicklei/go-restful/container.go
create mode 100644 vendor/github.com/emicklei/go-restful/cors_filter.go
create mode 100644 vendor/github.com/emicklei/go-restful/coverage.sh
create mode 100644 vendor/github.com/emicklei/go-restful/curly.go
create mode 100644 vendor/github.com/emicklei/go-restful/curly_route.go
create mode 100644 vendor/github.com/emicklei/go-restful/doc.go
create mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors.go
create mode 100644 vendor/github.com/emicklei/go-restful/filter.go
create mode 100644 vendor/github.com/emicklei/go-restful/json.go
create mode 100644 vendor/github.com/emicklei/go-restful/jsoniter.go
create mode 100644 vendor/github.com/emicklei/go-restful/jsr311.go
create mode 100644 vendor/github.com/emicklei/go-restful/log/log.go
create mode 100644 vendor/github.com/emicklei/go-restful/logger.go
create mode 100644 vendor/github.com/emicklei/go-restful/mime.go
create mode 100644 vendor/github.com/emicklei/go-restful/options_filter.go
create mode 100644 vendor/github.com/emicklei/go-restful/parameter.go
create mode 100644 vendor/github.com/emicklei/go-restful/path_expression.go
create mode 100644 vendor/github.com/emicklei/go-restful/path_processor.go
create mode 100644 vendor/github.com/emicklei/go-restful/request.go
create mode 100644 vendor/github.com/emicklei/go-restful/response.go
create mode 100644 vendor/github.com/emicklei/go-restful/route.go
create mode 100644 vendor/github.com/emicklei/go-restful/route_builder.go
create mode 100644 vendor/github.com/emicklei/go-restful/router.go
create mode 100644 vendor/github.com/emicklei/go-restful/service_error.go
create mode 100644 vendor/github.com/emicklei/go-restful/web_service.go
create mode 100644 vendor/github.com/emicklei/go-restful/web_service_container.go
create mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml
create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE
create mode 100644 vendor/github.com/evanphx/json-patch/README.md
create mode 100644 vendor/github.com/evanphx/json-patch/errors.go
create mode 100644 vendor/github.com/evanphx/json-patch/merge.go
create mode 100644 vendor/github.com/evanphx/json-patch/patch.go
create mode 100644 vendor/github.com/fatih/color/.travis.yml
create mode 100644 vendor/github.com/fatih/color/Gopkg.lock
create mode 100644 vendor/github.com/fatih/color/Gopkg.toml
create mode 100644 vendor/github.com/fatih/color/LICENSE.md
create mode 100644 vendor/github.com/fatih/color/README.md
create mode 100644 vendor/github.com/fatih/color/color.go
create mode 100644 vendor/github.com/fatih/color/doc.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig
create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes
create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore
create mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml
create mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS
create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE
create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/go.mod
create mode 100644 vendor/github.com/fsnotify/fsnotify/go.sum
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go
create mode 100644 vendor/github.com/gardener/etcd-druid/LICENSE.md
create mode 100644 vendor/github.com/gardener/etcd-druid/NOTICE.md
create mode 100644 vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go
create mode 100644 vendor/github.com/gardener/etcd-druid/api/v1alpha1/groupversion_info.go
create mode 100644 vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/external-dns-management/LICENSE.md
create mode 100644 vendor/github.com/gardener/external-dns-management/NOTICE.md
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go
create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/LICENSE.md
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/NOTICE.md
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedresources.go
create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedsecrets.go
create mode 100644 vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/bug.md
create mode 100644 vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/doc.go
create mode 100644 vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/feature.md
create mode 100644 vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/flaking-test.md
create mode 100644 vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/support.md
create mode 100644 vendor/github.com/gardener/gardener/.github/doc.go
create mode 100644 vendor/github.com/gardener/gardener/.github/pull_request_template.md
create mode 100644 vendor/github.com/gardener/gardener/LICENSE.md
create mode 100644 vendor/github.com/gardener/gardener/NOTICE.md
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/cluster.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/cmd.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/reconciler_options.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/error/requeue_error.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/actuator.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/mapper.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/reconciler.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/actuator.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config/types.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/controller.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/healtcheck_actuator.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/inject.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/reconciler.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/log.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/managedresources.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/reconciler.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/shoot.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/status.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/utils.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/handler/enqueue_mapped.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/handler/mapper.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/inject/inject.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/log/log.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/predicate/mapper.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/predicate/predicate.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/util/clientset.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/util/serialization.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go
create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go
create mode 100644 vendor/github.com/gardener/gardener/hack/.ci/component_descriptor
create mode 100644 vendor/github.com/gardener/gardener/hack/.ci/doc.go
create mode 100644 vendor/github.com/gardener/gardener/hack/.ci/prepare_release
create mode 100644 vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version
create mode 100644 vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt
create mode 100644 vendor/github.com/gardener/gardener/hack/api-reference/template/members.tpl
create mode 100644 vendor/github.com/gardener/gardener/hack/api-reference/template/pkg.tpl
create mode 100644 vendor/github.com/gardener/gardener/hack/api-reference/template/tools.go
create mode 100644 vendor/github.com/gardener/gardener/hack/api-reference/template/type.tpl
create mode 100755 vendor/github.com/gardener/gardener/hack/check-charts.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/check-generate.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/check.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/clean.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/format.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/generate-controller-registration.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/generate.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/get-build-ld-flags.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/hook-me.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/install-requirements.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/install.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/setup-envtest.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/test-cover-clean.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/test-cover.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/test-prometheus.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/test.sh
create mode 100644 vendor/github.com/gardener/gardener/hack/tools.go
create mode 100755 vendor/github.com/gardener/gardener/hack/update-codegen.sh
create mode 100755 vendor/github.com/gardener/gardener/hack/update-protobuf.sh
create mode 100644 vendor/github.com/gardener/gardener/pkg/api/core/accessor.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/api/extensions/accessor.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/install/install.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/constants/types_constants.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/condition_builder.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/helper.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/shootstate_list.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupbucket.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupentry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/cloudprofile.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/expansion_generated.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/plant.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/quota.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/secretbinding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/associations.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/finalizers.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/miscellaneous.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/operations.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/pointers.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/seedfilter.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/controllerutils/worker.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/extensions/cluster.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/logger/logger.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/mock/go/context/doc.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/mock/go/context/funcs.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/mock/go/context/mocks.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/common/extensions.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/common/managedresources.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/common/network_policies.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/common/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/common/utils.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/chart/chart.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/checksums.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/context/context.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/context/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/encoding.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_delaying.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_immediate.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/flow/taskid.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector_components.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/imagevector/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/infodata/infodata.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/infodata/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/bootstrap_token.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerinstallation.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerregistration.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/daemonset.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/deployment.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/etcd.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/leaderelection.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/namespace.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/node.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/patch.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/project.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/seed.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/service.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/shoot.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/sorter.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/statefulset.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/update.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/worker.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/labels.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/managedresources/managedresources.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/managedresources/registry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/object.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/random.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/retry/alias.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/retry/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/basic_auth.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/basic_auth_infodata.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/certificate_infodata.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/certificates.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/control_plane.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/generate.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/private_key_infodata.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/rsa_private_key.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/secrets.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/static_token.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/static_token_infodata.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/types.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/secrets/vpn_tlsauth.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/template_engine.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/timewindow.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/version/version.go
create mode 100644 vendor/github.com/gardener/gardener/pkg/version/version.go
create mode 100644 vendor/github.com/gardener/hvpa-controller/LICENSE.md
create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/groupversion_info.go
create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_types.go
create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_webhook.go
create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/ghodss/yaml/.gitignore
create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml
create mode 100644 vendor/github.com/ghodss/yaml/LICENSE
create mode 100644 vendor/github.com/ghodss/yaml/README.md
create mode 100644 vendor/github.com/ghodss/yaml/fields.go
create mode 100644 vendor/github.com/ghodss/yaml/yaml.go
create mode 100644 vendor/github.com/go-logr/logr/LICENSE
create mode 100644 vendor/github.com/go-logr/logr/README.md
create mode 100644 vendor/github.com/go-logr/logr/discard.go
create mode 100644 vendor/github.com/go-logr/logr/go.mod
create mode 100644 vendor/github.com/go-logr/logr/logr.go
create mode 100644 vendor/github.com/go-logr/zapr/.gitignore
create mode 100644 vendor/github.com/go-logr/zapr/Gopkg.lock
create mode 100644 vendor/github.com/go-logr/zapr/Gopkg.toml
create mode 100644 vendor/github.com/go-logr/zapr/LICENSE
create mode 100644 vendor/github.com/go-logr/zapr/README.md
create mode 100644 vendor/github.com/go-logr/zapr/go.mod
create mode 100644 vendor/github.com/go-logr/zapr/zapr.go
create mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig
create mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore
create mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml
create mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
create mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE
create mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md
create mode 100644 vendor/github.com/go-openapi/jsonpointer/go.mod
create mode 100644 vendor/github.com/go-openapi/jsonpointer/go.sum
create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go
create mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore
create mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml
create mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
create mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE
create mode 100644 vendor/github.com/go-openapi/jsonreference/README.md
create mode 100644 vendor/github.com/go-openapi/jsonreference/go.mod
create mode 100644 vendor/github.com/go-openapi/jsonreference/go.sum
create mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go
create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig
create mode 100644 vendor/github.com/go-openapi/spec/.gitignore
create mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml
create mode 100644 vendor/github.com/go-openapi/spec/.travis.yml
create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
create mode 100644 vendor/github.com/go-openapi/spec/LICENSE
create mode 100644 vendor/github.com/go-openapi/spec/README.md
create mode 100644 vendor/github.com/go-openapi/spec/bindata.go
create mode 100644 vendor/github.com/go-openapi/spec/cache.go
create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go
create mode 100644 vendor/github.com/go-openapi/spec/debug.go
create mode 100644 vendor/github.com/go-openapi/spec/expander.go
create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go
create mode 100644 vendor/github.com/go-openapi/spec/go.mod
create mode 100644 vendor/github.com/go-openapi/spec/go.sum
create mode 100644 vendor/github.com/go-openapi/spec/header.go
create mode 100644 vendor/github.com/go-openapi/spec/info.go
create mode 100644 vendor/github.com/go-openapi/spec/items.go
create mode 100644 vendor/github.com/go-openapi/spec/license.go
create mode 100644 vendor/github.com/go-openapi/spec/normalizer.go
create mode 100644 vendor/github.com/go-openapi/spec/operation.go
create mode 100644 vendor/github.com/go-openapi/spec/parameter.go
create mode 100644 vendor/github.com/go-openapi/spec/path_item.go
create mode 100644 vendor/github.com/go-openapi/spec/paths.go
create mode 100644 vendor/github.com/go-openapi/spec/ref.go
create mode 100644 vendor/github.com/go-openapi/spec/response.go
create mode 100644 vendor/github.com/go-openapi/spec/responses.go
create mode 100644 vendor/github.com/go-openapi/spec/schema.go
create mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go
create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go
create mode 100644 vendor/github.com/go-openapi/spec/spec.go
create mode 100644 vendor/github.com/go-openapi/spec/swagger.go
create mode 100644 vendor/github.com/go-openapi/spec/tag.go
create mode 100644 vendor/github.com/go-openapi/spec/unused.go
create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go
create mode 100644 vendor/github.com/go-openapi/swag/.editorconfig
create mode 100644 vendor/github.com/go-openapi/swag/.gitignore
create mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml
create mode 100644 vendor/github.com/go-openapi/swag/.travis.yml
create mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
create mode 100644 vendor/github.com/go-openapi/swag/LICENSE
create mode 100644 vendor/github.com/go-openapi/swag/README.md
create mode 100644 vendor/github.com/go-openapi/swag/convert.go
create mode 100644 vendor/github.com/go-openapi/swag/convert_types.go
create mode 100644 vendor/github.com/go-openapi/swag/doc.go
create mode 100644 vendor/github.com/go-openapi/swag/go.mod
create mode 100644 vendor/github.com/go-openapi/swag/go.sum
create mode 100644 vendor/github.com/go-openapi/swag/json.go
create mode 100644 vendor/github.com/go-openapi/swag/loading.go
create mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go
create mode 100644 vendor/github.com/go-openapi/swag/net.go
create mode 100644 vendor/github.com/go-openapi/swag/path.go
create mode 100644 vendor/github.com/go-openapi/swag/post_go18.go
create mode 100644 vendor/github.com/go-openapi/swag/post_go19.go
create mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go
create mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go
create mode 100644 vendor/github.com/go-openapi/swag/split.go
create mode 100644 vendor/github.com/go-openapi/swag/util.go
create mode 100644 vendor/github.com/go-openapi/swag/yaml.go
create mode 100644 vendor/github.com/gobuffalo/flect/.gitignore
create mode 100644 vendor/github.com/gobuffalo/flect/.gometalinter.json
create mode 100644 vendor/github.com/gobuffalo/flect/LICENSE
create mode 100644 vendor/github.com/gobuffalo/flect/Makefile
create mode 100644 vendor/github.com/gobuffalo/flect/README.md
create mode 100644 vendor/github.com/gobuffalo/flect/SHOULDERS.md
create mode 100644 vendor/github.com/gobuffalo/flect/acronyms.go
create mode 100644 vendor/github.com/gobuffalo/flect/azure-pipelines.yml
create mode 100644 vendor/github.com/gobuffalo/flect/azure-tests.yml
create mode 100644 vendor/github.com/gobuffalo/flect/camelize.go
create mode 100644 vendor/github.com/gobuffalo/flect/capitalize.go
create mode 100644 vendor/github.com/gobuffalo/flect/custom_data.go
create mode 100644 vendor/github.com/gobuffalo/flect/dasherize.go
create mode 100644 vendor/github.com/gobuffalo/flect/flect.go
create mode 100644 vendor/github.com/gobuffalo/flect/go.mod
create mode 100644 vendor/github.com/gobuffalo/flect/go.sum
create mode 100644 vendor/github.com/gobuffalo/flect/humanize.go
create mode 100644 vendor/github.com/gobuffalo/flect/ident.go
create mode 100644 vendor/github.com/gobuffalo/flect/lower_upper.go
create mode 100644 vendor/github.com/gobuffalo/flect/ordinalize.go
create mode 100644 vendor/github.com/gobuffalo/flect/pascalize.go
create mode 100644 vendor/github.com/gobuffalo/flect/plural_rules.go
create mode 100644 vendor/github.com/gobuffalo/flect/pluralize.go
create mode 100644 vendor/github.com/gobuffalo/flect/rule.go
create mode 100644 vendor/github.com/gobuffalo/flect/singular_rules.go
create mode 100644 vendor/github.com/gobuffalo/flect/singularize.go
create mode 100644 vendor/github.com/gobuffalo/flect/titleize.go
create mode 100644 vendor/github.com/gobuffalo/flect/underscore.go
create mode 100644 vendor/github.com/gobuffalo/flect/version.go
create mode 100644 vendor/github.com/gobuffalo/logger/.gitignore
create mode 100644 vendor/github.com/gobuffalo/logger/LICENSE
create mode 100644 vendor/github.com/gobuffalo/logger/Makefile
create mode 100644 vendor/github.com/gobuffalo/logger/README.md
create mode 100644 vendor/github.com/gobuffalo/logger/SHOULDERS.md
create mode 100644 vendor/github.com/gobuffalo/logger/formatter.go
create mode 100644 vendor/github.com/gobuffalo/logger/go.mod
create mode 100644 vendor/github.com/gobuffalo/logger/go.sum
create mode 100644 vendor/github.com/gobuffalo/logger/level.go
create mode 100644 vendor/github.com/gobuffalo/logger/logger.go
create mode 100644 vendor/github.com/gobuffalo/logger/logrus.go
create mode 100644 vendor/github.com/gobuffalo/logger/outable.go
create mode 100644 vendor/github.com/gobuffalo/logger/terminal_check.go
create mode 100644 vendor/github.com/gobuffalo/logger/terminal_check_appengine.go
create mode 100644 vendor/github.com/gobuffalo/logger/version.go
create mode 100644 vendor/github.com/gobuffalo/packd/.gitignore
create mode 100644 vendor/github.com/gobuffalo/packd/LICENSE
create mode 100644 vendor/github.com/gobuffalo/packd/Makefile
create mode 100644 vendor/github.com/gobuffalo/packd/README.md
create mode 100644 vendor/github.com/gobuffalo/packd/SHOULDERS.md
create mode 100644 vendor/github.com/gobuffalo/packd/file.go
create mode 100644 vendor/github.com/gobuffalo/packd/file_info.go
create mode 100644 vendor/github.com/gobuffalo/packd/go.mod
create mode 100644 vendor/github.com/gobuffalo/packd/go.sum
create mode 100644 vendor/github.com/gobuffalo/packd/interfaces.go
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/.gitignore
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/LICENSE
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/Makefile
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/SHOULDERS.md
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/azure-pipelines.yml
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/azure-tests.yml
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/errx.go
create mode 100644 vendor/github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx/version.go
create mode 100644 vendor/github.com/gobuffalo/packd/map.go
create mode 100644 vendor/github.com/gobuffalo/packd/memory_box.go
create mode 100644 vendor/github.com/gobuffalo/packd/skip_walker.go
create mode 100644 vendor/github.com/gobuffalo/packd/version.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/.gometalinter.json
create mode 100644 vendor/github.com/gobuffalo/packr/v2/.goreleaser.yml
create mode 100644 vendor/github.com/gobuffalo/packr/v2/.goreleaser.yml.plush
create mode 100644 vendor/github.com/gobuffalo/packr/v2/LICENSE.txt
create mode 100644 vendor/github.com/gobuffalo/packr/v2/Makefile
create mode 100644 vendor/github.com/gobuffalo/packr/v2/README.md
create mode 100644 vendor/github.com/gobuffalo/packr/v2/SHOULDERS.md
create mode 100644 vendor/github.com/gobuffalo/packr/v2/box.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/box_map.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/deprecated.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/dirs_map.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/file.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/info.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/disk.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/encoding/hex/hex.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/hex_gzip.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/ident.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/in_memory.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/packable.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/file/resolver/resolver.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/go.mod
create mode 100644 vendor/github.com/gobuffalo/packr/v2/go.sum
create mode 100644 vendor/github.com/gobuffalo/packr/v2/helpers.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/internal/envy.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/pack.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/args.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/box.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/file.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/finder.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/gogen.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/parser.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/prospect.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/roots.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/parser/visitor.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/clean.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/disk.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/disk_tmpl.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/env.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/fn.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/legacy.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/jam/store/store.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/LICENSE
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/build.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/clean.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/fix.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/fix/fix.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/fix/imports.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/fix/runner.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/gocmd.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/install.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/pack.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/root.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/cmd/version.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/packr2/main.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/plog/plog.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/pointer.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/resolvers_map.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/version.go
create mode 100644 vendor/github.com/gobuffalo/packr/v2/walk.go
create mode 100644 vendor/github.com/gobwas/glob/.gitignore
create mode 100644 vendor/github.com/gobwas/glob/.travis.yml
create mode 100644 vendor/github.com/gobwas/glob/LICENSE
create mode 100644 vendor/github.com/gobwas/glob/bench.sh
create mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go
create mode 100644 vendor/github.com/gobwas/glob/glob.go
create mode 100644 vendor/github.com/gobwas/glob/match/any.go
create mode 100644 vendor/github.com/gobwas/glob/match/any_of.go
create mode 100644 vendor/github.com/gobwas/glob/match/btree.go
create mode 100644 vendor/github.com/gobwas/glob/match/contains.go
create mode 100644 vendor/github.com/gobwas/glob/match/every_of.go
create mode 100644 vendor/github.com/gobwas/glob/match/list.go
create mode 100644 vendor/github.com/gobwas/glob/match/match.go
create mode 100644 vendor/github.com/gobwas/glob/match/max.go
create mode 100644 vendor/github.com/gobwas/glob/match/min.go
create mode 100644 vendor/github.com/gobwas/glob/match/nothing.go
create mode 100644 vendor/github.com/gobwas/glob/match/prefix.go
create mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go
create mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go
create mode 100644 vendor/github.com/gobwas/glob/match/range.go
create mode 100644 vendor/github.com/gobwas/glob/match/row.go
create mode 100644 vendor/github.com/gobwas/glob/match/segments.go
create mode 100644 vendor/github.com/gobwas/glob/match/single.go
create mode 100644 vendor/github.com/gobwas/glob/match/suffix.go
create mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go
create mode 100644 vendor/github.com/gobwas/glob/match/super.go
create mode 100644 vendor/github.com/gobwas/glob/match/text.go
create mode 100644 vendor/github.com/gobwas/glob/readme.md
create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go
create mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go
create mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go
create mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go
create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS
create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS
create mode 100644 vendor/github.com/gogo/protobuf/LICENSE
create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile
create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go
create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/compare/compare.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/description/description.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/equal/equal.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/face/face.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/face/facetest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/populate/populate.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/size/size.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/size/sizetest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/union/union.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/union/uniontest.go
create mode 100644 vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile
create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go
create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap/remap.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile
create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go
create mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
create mode 100644 vendor/github.com/gogo/protobuf/vanity/command/command.go
create mode 100644 vendor/github.com/gogo/protobuf/vanity/enum.go
create mode 100644 vendor/github.com/gogo/protobuf/vanity/field.go
create mode 100644 vendor/github.com/gogo/protobuf/vanity/file.go
create mode 100644 vendor/github.com/gogo/protobuf/vanity/foreach.go
create mode 100644 vendor/github.com/gogo/protobuf/vanity/msg.go
create mode 100644 vendor/github.com/golang/groupcache/LICENSE
create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go
create mode 100644 vendor/github.com/golang/mock/AUTHORS
create mode 100644 vendor/github.com/golang/mock/CONTRIBUTORS
create mode 100644 vendor/github.com/golang/mock/LICENSE
create mode 100644 vendor/github.com/golang/mock/gomock/call.go
create mode 100644 vendor/github.com/golang/mock/gomock/callset.go
create mode 100644 vendor/github.com/golang/mock/gomock/controller.go
create mode 100644 vendor/github.com/golang/mock/gomock/matchers.go
create mode 100644 vendor/github.com/golang/mock/mockgen/mockgen.go
create mode 100644 vendor/github.com/golang/mock/mockgen/model/model.go
create mode 100644 vendor/github.com/golang/mock/mockgen/parse.go
create mode 100644 vendor/github.com/golang/mock/mockgen/reflect.go
create mode 100644 vendor/github.com/golang/mock/mockgen/version.1.11.go
create mode 100644 vendor/github.com/golang/mock/mockgen/version.1.12.go
create mode 100644 vendor/github.com/golang/protobuf/AUTHORS
create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS
create mode 100644 vendor/github.com/golang/protobuf/LICENSE
create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go
create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go
create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go
create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go
create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go
create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go
create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go
create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go
create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go
create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go
create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go
create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
create mode 100644 vendor/github.com/google/go-cmp/LICENSE
create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go
create mode 100644 vendor/github.com/google/gofuzz/.travis.yml
create mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md
create mode 100644 vendor/github.com/google/gofuzz/LICENSE
create mode 100644 vendor/github.com/google/gofuzz/README.md
create mode 100644 vendor/github.com/google/gofuzz/doc.go
create mode 100644 vendor/github.com/google/gofuzz/fuzz.go
create mode 100644 vendor/github.com/google/gofuzz/go.mod
create mode 100644 vendor/github.com/google/uuid/.travis.yml
create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md
create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS
create mode 100644 vendor/github.com/google/uuid/LICENSE
create mode 100644 vendor/github.com/google/uuid/README.md
create mode 100644 vendor/github.com/google/uuid/dce.go
create mode 100644 vendor/github.com/google/uuid/doc.go
create mode 100644 vendor/github.com/google/uuid/go.mod
create mode 100644 vendor/github.com/google/uuid/hash.go
create mode 100644 vendor/github.com/google/uuid/marshal.go
create mode 100644 vendor/github.com/google/uuid/node.go
create mode 100644 vendor/github.com/google/uuid/node_js.go
create mode 100644 vendor/github.com/google/uuid/node_net.go
create mode 100644 vendor/github.com/google/uuid/sql.go
create mode 100644 vendor/github.com/google/uuid/time.go
create mode 100644 vendor/github.com/google/uuid/util.go
create mode 100644 vendor/github.com/google/uuid/uuid.go
create mode 100644 vendor/github.com/google/uuid/version1.go
create mode 100644 vendor/github.com/google/uuid/version4.go
create mode 100644 vendor/github.com/googleapis/gnostic/LICENSE
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extensions.go
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go
create mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go
create mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md
create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto
create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/README.md
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/base.go
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/display.go
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/models.go
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/operations.go
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/reader.go
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/schema.json
create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/writer.go
create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/README.md
create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/document.go
create mode 100644 vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json
create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE
create mode 100644 vendor/github.com/hashicorp/errwrap/README.md
create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go
create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod
create mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml
create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE
create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile
create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md
create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go
create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go
create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go
create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod
create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum
create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go
create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go
create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go
create mode 100644 vendor/github.com/hashicorp/golang-lru/.gitignore
create mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go
create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE
create mode 100644 vendor/github.com/hashicorp/golang-lru/README.md
create mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go
create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go
create mode 100644 vendor/github.com/hashicorp/golang-lru/go.mod
create mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go
create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
create mode 100644 vendor/github.com/huandu/xstrings/.gitignore
create mode 100644 vendor/github.com/huandu/xstrings/.travis.yml
create mode 100644 vendor/github.com/huandu/xstrings/CONTRIBUTING.md
create mode 100644 vendor/github.com/huandu/xstrings/LICENSE
create mode 100644 vendor/github.com/huandu/xstrings/README.md
create mode 100644 vendor/github.com/huandu/xstrings/common.go
create mode 100644 vendor/github.com/huandu/xstrings/convert.go
create mode 100644 vendor/github.com/huandu/xstrings/count.go
create mode 100644 vendor/github.com/huandu/xstrings/doc.go
create mode 100644 vendor/github.com/huandu/xstrings/format.go
create mode 100644 vendor/github.com/huandu/xstrings/go.mod
create mode 100644 vendor/github.com/huandu/xstrings/manipulate.go
create mode 100644 vendor/github.com/huandu/xstrings/translate.go
create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml
create mode 100644 vendor/github.com/imdario/mergo/.gitignore
create mode 100644 vendor/github.com/imdario/mergo/.travis.yml
create mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
create mode 100644 vendor/github.com/imdario/mergo/LICENSE
create mode 100644 vendor/github.com/imdario/mergo/README.md
create mode 100644 vendor/github.com/imdario/mergo/doc.go
create mode 100644 vendor/github.com/imdario/mergo/go.mod
create mode 100644 vendor/github.com/imdario/mergo/go.sum
create mode 100644 vendor/github.com/imdario/mergo/map.go
create mode 100644 vendor/github.com/imdario/mergo/merge.go
create mode 100644 vendor/github.com/imdario/mergo/mergo.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE
create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml
create mode 100644 vendor/github.com/json-iterator/go/.gitignore
create mode 100644 vendor/github.com/json-iterator/go/.travis.yml
create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock
create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml
create mode 100644 vendor/github.com/json-iterator/go/LICENSE
create mode 100644 vendor/github.com/json-iterator/go/README.md
create mode 100644 vendor/github.com/json-iterator/go/adapter.go
create mode 100644 vendor/github.com/json-iterator/go/any.go
create mode 100644 vendor/github.com/json-iterator/go/any_array.go
create mode 100644 vendor/github.com/json-iterator/go/any_bool.go
create mode 100644 vendor/github.com/json-iterator/go/any_float.go
create mode 100644 vendor/github.com/json-iterator/go/any_int32.go
create mode 100644 vendor/github.com/json-iterator/go/any_int64.go
create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go
create mode 100644 vendor/github.com/json-iterator/go/any_nil.go
create mode 100644 vendor/github.com/json-iterator/go/any_number.go
create mode 100644 vendor/github.com/json-iterator/go/any_object.go
create mode 100644 vendor/github.com/json-iterator/go/any_str.go
create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go
create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go
create mode 100644 vendor/github.com/json-iterator/go/build.sh
create mode 100644 vendor/github.com/json-iterator/go/config.go
create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
create mode 100644 vendor/github.com/json-iterator/go/go.mod
create mode 100644 vendor/github.com/json-iterator/go/go.sum
create mode 100644 vendor/github.com/json-iterator/go/iter.go
create mode 100644 vendor/github.com/json-iterator/go/iter_array.go
create mode 100644 vendor/github.com/json-iterator/go/iter_float.go
create mode 100644 vendor/github.com/json-iterator/go/iter_int.go
create mode 100644 vendor/github.com/json-iterator/go/iter_object.go
create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go
create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go
create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go
create mode 100644 vendor/github.com/json-iterator/go/iter_str.go
create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go
create mode 100644 vendor/github.com/json-iterator/go/pool.go
create mode 100644 vendor/github.com/json-iterator/go/reflect.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go
create mode 100644 vendor/github.com/json-iterator/go/stream.go
create mode 100644 vendor/github.com/json-iterator/go/stream_float.go
create mode 100644 vendor/github.com/json-iterator/go/stream_int.go
create mode 100644 vendor/github.com/json-iterator/go/stream_str.go
create mode 100644 vendor/github.com/json-iterator/go/test.sh
create mode 100644 vendor/github.com/karrick/godirwalk/.gitignore
create mode 100644 vendor/github.com/karrick/godirwalk/LICENSE
create mode 100644 vendor/github.com/karrick/godirwalk/README.md
create mode 100644 vendor/github.com/karrick/godirwalk/azure-pipelines.yml
create mode 100644 vendor/github.com/karrick/godirwalk/bench.sh
create mode 100644 vendor/github.com/karrick/godirwalk/debug_development.go
create mode 100644 vendor/github.com/karrick/godirwalk/debug_release.go
create mode 100644 vendor/github.com/karrick/godirwalk/dirent.go
create mode 100644 vendor/github.com/karrick/godirwalk/doc.go
create mode 100644 vendor/github.com/karrick/godirwalk/go.mod
create mode 100644 vendor/github.com/karrick/godirwalk/go.sum
create mode 100644 vendor/github.com/karrick/godirwalk/inoWithFileno.go
create mode 100644 vendor/github.com/karrick/godirwalk/inoWithIno.go
create mode 100644 vendor/github.com/karrick/godirwalk/modeType.go
create mode 100644 vendor/github.com/karrick/godirwalk/modeTypeWithType.go
create mode 100644 vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go
create mode 100644 vendor/github.com/karrick/godirwalk/nameWithNamlen.go
create mode 100644 vendor/github.com/karrick/godirwalk/nameWithoutNamlen.go
create mode 100644 vendor/github.com/karrick/godirwalk/readdir.go
create mode 100644 vendor/github.com/karrick/godirwalk/readdir_unix.go
create mode 100644 vendor/github.com/karrick/godirwalk/readdir_windows.go
create mode 100644 vendor/github.com/karrick/godirwalk/reclenFromNamlen.go
create mode 100644 vendor/github.com/karrick/godirwalk/reclenFromReclen.go
create mode 100644 vendor/github.com/karrick/godirwalk/scandir_unix.go
create mode 100644 vendor/github.com/karrick/godirwalk/scandir_windows.go
create mode 100644 vendor/github.com/karrick/godirwalk/scanner.go
create mode 100644 vendor/github.com/karrick/godirwalk/walk.go
create mode 100644 vendor/github.com/mailru/easyjson/LICENSE
create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go
create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
create mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go
create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go
create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go
create mode 100644 vendor/github.com/markbates/errx/.gitignore
create mode 100644 vendor/github.com/markbates/errx/LICENSE
create mode 100644 vendor/github.com/markbates/errx/Makefile
create mode 100644 vendor/github.com/markbates/errx/SHOULDERS.md
create mode 100644 vendor/github.com/markbates/errx/azure-pipelines.yml
create mode 100644 vendor/github.com/markbates/errx/azure-tests.yml
create mode 100644 vendor/github.com/markbates/errx/errx.go
create mode 100644 vendor/github.com/markbates/errx/go.mod
create mode 100644 vendor/github.com/markbates/errx/go.sum
create mode 100644 vendor/github.com/markbates/errx/version.go
create mode 100644 vendor/github.com/markbates/oncer/.gitignore
create mode 100644 vendor/github.com/markbates/oncer/LICENSE
create mode 100644 vendor/github.com/markbates/oncer/Makefile
create mode 100644 vendor/github.com/markbates/oncer/SHOULDERS.md
create mode 100644 vendor/github.com/markbates/oncer/azure-pipelines.yml
create mode 100644 vendor/github.com/markbates/oncer/azure-tests.yml
create mode 100644 vendor/github.com/markbates/oncer/deprecate.go
create mode 100644 vendor/github.com/markbates/oncer/go.mod
create mode 100644 vendor/github.com/markbates/oncer/go.sum
create mode 100644 vendor/github.com/markbates/oncer/log.go
create mode 100644 vendor/github.com/markbates/oncer/log_debug.go
create mode 100644 vendor/github.com/markbates/oncer/oncer.go
create mode 100644 vendor/github.com/markbates/oncer/version.go
create mode 100644 vendor/github.com/markbates/safe/.gitignore
create mode 100644 vendor/github.com/markbates/safe/.gometalinter.json
create mode 100644 vendor/github.com/markbates/safe/.travis.yml
create mode 100644 vendor/github.com/markbates/safe/LICENSE
create mode 100644 vendor/github.com/markbates/safe/Makefile
create mode 100644 vendor/github.com/markbates/safe/go.mod
create mode 100644 vendor/github.com/markbates/safe/go.sum
create mode 100644 vendor/github.com/markbates/safe/safe.go
create mode 100644 vendor/github.com/markbates/safe/shoulders.md
create mode 100644 vendor/github.com/markbates/safe/version.go
create mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml
create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE
create mode 100644 vendor/github.com/mattn/go-colorable/README.md
create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go
create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go
create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go
create mode 100644 vendor/github.com/mattn/go-colorable/go.mod
create mode 100644 vendor/github.com/mattn/go-colorable/go.sum
create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go
create mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml
create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE
create mode 100644 vendor/github.com/mattn/go-isatty/README.md
create mode 100644 vendor/github.com/mattn/go-isatty/doc.go
create mode 100644 vendor/github.com/mattn/go-isatty/go.mod
create mode 100644 vendor/github.com/mattn/go-isatty/go.sum
create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh
create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go
create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go
create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go
create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go
create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go
create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go
create mode 100644 vendor/github.com/mattn/go-isatty/renovate.json
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
create mode 100644 vendor/github.com/mitchellh/copystructure/.travis.yml
create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE
create mode 100644 vendor/github.com/mitchellh/copystructure/README.md
create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go
create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go
create mode 100644 vendor/github.com/mitchellh/copystructure/go.mod
create mode 100644 vendor/github.com/mitchellh/copystructure/go.sum
create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml
create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE
create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md
create mode 100644 vendor/github.com/mitchellh/reflectwalk/go.mod
create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go
create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go
create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
create mode 100644 vendor/github.com/modern-go/concurrent/.gitignore
create mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml
create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE
create mode 100644 vendor/github.com/modern-go/concurrent/README.md
create mode 100644 vendor/github.com/modern-go/concurrent/executor.go
create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go
create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go
create mode 100644 vendor/github.com/modern-go/concurrent/log.go
create mode 100644 vendor/github.com/modern-go/concurrent/test.sh
create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go
create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore
create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml
create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock
create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml
create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE
create mode 100644 vendor/github.com/modern-go/reflect2/README.md
create mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go
create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go
create mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go
create mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go
create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go
create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s
create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s
create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go
create mode 100644 vendor/github.com/modern-go/reflect2/test.sh
create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go
create mode 100644 vendor/github.com/nxadm/tail/.gitignore
create mode 100644 vendor/github.com/nxadm/tail/.travis.yml
create mode 100644 vendor/github.com/nxadm/tail/CHANGES.md
create mode 100644 vendor/github.com/nxadm/tail/Dockerfile
create mode 100644 vendor/github.com/nxadm/tail/LICENSE
create mode 100644 vendor/github.com/nxadm/tail/README.md
create mode 100644 vendor/github.com/nxadm/tail/appveyor.yml
create mode 100644 vendor/github.com/nxadm/tail/go.mod
create mode 100644 vendor/github.com/nxadm/tail/go.sum
create mode 100644 vendor/github.com/nxadm/tail/ratelimiter/Licence
create mode 100644 vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
create mode 100644 vendor/github.com/nxadm/tail/ratelimiter/memory.go
create mode 100644 vendor/github.com/nxadm/tail/ratelimiter/storage.go
create mode 100644 vendor/github.com/nxadm/tail/tail.go
create mode 100644 vendor/github.com/nxadm/tail/tail_posix.go
create mode 100644 vendor/github.com/nxadm/tail/tail_windows.go
create mode 100644 vendor/github.com/nxadm/tail/util/util.go
create mode 100644 vendor/github.com/nxadm/tail/watch/filechanges.go
create mode 100644 vendor/github.com/nxadm/tail/watch/inotify.go
create mode 100644 vendor/github.com/nxadm/tail/watch/inotify_tracker.go
create mode 100644 vendor/github.com/nxadm/tail/watch/polling.go
create mode 100644 vendor/github.com/nxadm/tail/watch/watch.go
create mode 100644 vendor/github.com/nxadm/tail/winfile/winfile.go
create mode 100644 vendor/github.com/onsi/ginkgo/LICENSE
create mode 100644 vendor/github.com/onsi/ginkgo/config/config.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/main.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/failer/failer.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/server.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/specs.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
create mode 100644 vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/types/code_location.go
create mode 100644 vendor/github.com/onsi/ginkgo/types/synchronization.go
create mode 100644 vendor/github.com/onsi/ginkgo/types/types.go
create mode 100644 vendor/github.com/pkg/errors/.gitignore
create mode 100644 vendor/github.com/pkg/errors/.travis.yml
create mode 100644 vendor/github.com/pkg/errors/LICENSE
create mode 100644 vendor/github.com/pkg/errors/Makefile
create mode 100644 vendor/github.com/pkg/errors/README.md
create mode 100644 vendor/github.com/pkg/errors/appveyor.yml
create mode 100644 vendor/github.com/pkg/errors/errors.go
create mode 100644 vendor/github.com/pkg/errors/go113.go
create mode 100644 vendor/github.com/pkg/errors/stack.go
create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE
create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go
create mode 100644 vendor/github.com/prometheus/client_model/LICENSE
create mode 100644 vendor/github.com/prometheus/client_model/NOTICE
create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go
create mode 100644 vendor/github.com/prometheus/common/LICENSE
create mode 100644 vendor/github.com/prometheus/common/NOTICE
create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go
create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go
create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go
create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go
create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go
create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go
create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
create mode 100644 vendor/github.com/prometheus/common/model/alert.go
create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go
create mode 100644 vendor/github.com/prometheus/common/model/fnv.go
create mode 100644 vendor/github.com/prometheus/common/model/labels.go
create mode 100644 vendor/github.com/prometheus/common/model/labelset.go
create mode 100644 vendor/github.com/prometheus/common/model/metric.go
create mode 100644 vendor/github.com/prometheus/common/model/model.go
create mode 100644 vendor/github.com/prometheus/common/model/signature.go
create mode 100644 vendor/github.com/prometheus/common/model/silence.go
create mode 100644 vendor/github.com/prometheus/common/model/time.go
create mode 100644 vendor/github.com/prometheus/common/model/value.go
create mode 100644 vendor/github.com/prometheus/procfs/.gitignore
create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml
create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md
create mode 100644 vendor/github.com/prometheus/procfs/LICENSE
create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md
create mode 100644 vendor/github.com/prometheus/procfs/Makefile
create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common
create mode 100644 vendor/github.com/prometheus/procfs/NOTICE
create mode 100644 vendor/github.com/prometheus/procfs/README.md
create mode 100644 vendor/github.com/prometheus/procfs/arp.go
create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_arm.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_arm64.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_default.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
create mode 100644 vendor/github.com/prometheus/procfs/crypto.go
create mode 100644 vendor/github.com/prometheus/procfs/doc.go
create mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar
create mode 100644 vendor/github.com/prometheus/procfs/fs.go
create mode 100644 vendor/github.com/prometheus/procfs/fscache.go
create mode 100644 vendor/github.com/prometheus/procfs/go.mod
create mode 100644 vendor/github.com/prometheus/procfs/go.sum
create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go
create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go
create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go
create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go
create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go
create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go
create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go
create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go
create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go
create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go
create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go
create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go
create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go
create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go
create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go
create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go
create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go
create mode 100644 vendor/github.com/prometheus/procfs/proc.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go
create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go
create mode 100644 vendor/github.com/prometheus/procfs/stat.go
create mode 100644 vendor/github.com/prometheus/procfs/swaps.go
create mode 100644 vendor/github.com/prometheus/procfs/ttar
create mode 100644 vendor/github.com/prometheus/procfs/vm.go
create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go
create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/LICENSE
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/bundle.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/doc.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/git.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/target.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/values.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated_deepcopy.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated_list_types.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated_register.go
create mode 100644 vendor/github.com/rancher/fleet/pkg/apis/fleet.cattle.io/zz_generated_register.go
create mode 100644 vendor/github.com/rancher/wrangler/LICENSE
create mode 100644 vendor/github.com/rancher/wrangler/pkg/data/convert/convert.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/data/data.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/data/merge.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/data/values.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/genericcondition/condition.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/kv/split.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/summary/cattletypes.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/summary/condition.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/summary/coretypes.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/summary/summarized.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/summary/summarizers.go
create mode 100644 vendor/github.com/rancher/wrangler/pkg/summary/summary.go
create mode 100644 vendor/github.com/rogpeppe/go-internal/LICENSE
create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go
create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/print.go
create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/read.go
create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/rule.go
create mode 100644 vendor/github.com/rogpeppe/go-internal/module/module.go
create mode 100644 vendor/github.com/rogpeppe/go-internal/semver/semver.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore
create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml
create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt
create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md
create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/go.mod
create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go
create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go
create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml
create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE
create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/README.md
create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/main.go
create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore
create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml
create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml
create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md
create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE
create mode 100644 vendor/github.com/sirupsen/logrus/README.md
create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go
create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml
create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go
create mode 100644 vendor/github.com/sirupsen/logrus/doc.go
create mode 100644 vendor/github.com/sirupsen/logrus/entry.go
create mode 100644 vendor/github.com/sirupsen/logrus/exported.go
create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go
create mode 100644 vendor/github.com/sirupsen/logrus/go.mod
create mode 100644 vendor/github.com/sirupsen/logrus/go.sum
create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go
create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go
create mode 100644 vendor/github.com/sirupsen/logrus/logger.go
create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go
create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go
create mode 100644 vendor/github.com/sirupsen/logrus/writer.go
create mode 100644 vendor/github.com/spf13/cobra/.gitignore
create mode 100644 vendor/github.com/spf13/cobra/.mailmap
create mode 100644 vendor/github.com/spf13/cobra/.travis.yml
create mode 100644 vendor/github.com/spf13/cobra/CHANGELOG.md
create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md
create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt
create mode 100644 vendor/github.com/spf13/cobra/Makefile
create mode 100644 vendor/github.com/spf13/cobra/README.md
create mode 100644 vendor/github.com/spf13/cobra/args.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md
create mode 100644 vendor/github.com/spf13/cobra/cobra.go
create mode 100644 vendor/github.com/spf13/cobra/command.go
create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go
create mode 100644 vendor/github.com/spf13/cobra/command_win.go
create mode 100644 vendor/github.com/spf13/cobra/custom_completions.go
create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go
create mode 100644 vendor/github.com/spf13/cobra/fish_completions.md
create mode 100644 vendor/github.com/spf13/cobra/go.mod
create mode 100644 vendor/github.com/spf13/cobra/go.sum
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md
create mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md
create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go
create mode 100644 vendor/github.com/spf13/cobra/shell_completions.md
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md
create mode 100644 vendor/github.com/spf13/pflag/.gitignore
create mode 100644 vendor/github.com/spf13/pflag/.travis.yml
create mode 100644 vendor/github.com/spf13/pflag/LICENSE
create mode 100644 vendor/github.com/spf13/pflag/README.md
create mode 100644 vendor/github.com/spf13/pflag/bool.go
create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go
create mode 100644 vendor/github.com/spf13/pflag/bytes.go
create mode 100644 vendor/github.com/spf13/pflag/count.go
create mode 100644 vendor/github.com/spf13/pflag/duration.go
create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go
create mode 100644 vendor/github.com/spf13/pflag/flag.go
create mode 100644 vendor/github.com/spf13/pflag/float32.go
create mode 100644 vendor/github.com/spf13/pflag/float32_slice.go
create mode 100644 vendor/github.com/spf13/pflag/float64.go
create mode 100644 vendor/github.com/spf13/pflag/float64_slice.go
create mode 100644 vendor/github.com/spf13/pflag/go.mod
create mode 100644 vendor/github.com/spf13/pflag/go.sum
create mode 100644 vendor/github.com/spf13/pflag/golangflag.go
create mode 100644 vendor/github.com/spf13/pflag/int.go
create mode 100644 vendor/github.com/spf13/pflag/int16.go
create mode 100644 vendor/github.com/spf13/pflag/int32.go
create mode 100644 vendor/github.com/spf13/pflag/int32_slice.go
create mode 100644 vendor/github.com/spf13/pflag/int64.go
create mode 100644 vendor/github.com/spf13/pflag/int64_slice.go
create mode 100644 vendor/github.com/spf13/pflag/int8.go
create mode 100644 vendor/github.com/spf13/pflag/int_slice.go
create mode 100644 vendor/github.com/spf13/pflag/ip.go
create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go
create mode 100644 vendor/github.com/spf13/pflag/ipmask.go
create mode 100644 vendor/github.com/spf13/pflag/ipnet.go
create mode 100644 vendor/github.com/spf13/pflag/string.go
create mode 100644 vendor/github.com/spf13/pflag/string_array.go
create mode 100644 vendor/github.com/spf13/pflag/string_slice.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int64.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_string.go
create mode 100644 vendor/github.com/spf13/pflag/uint.go
create mode 100644 vendor/github.com/spf13/pflag/uint16.go
create mode 100644 vendor/github.com/spf13/pflag/uint32.go
create mode 100644 vendor/github.com/spf13/pflag/uint64.go
create mode 100644 vendor/github.com/spf13/pflag/uint8.go
create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go
create mode 100644 vendor/go.uber.org/atomic/.codecov.yml
create mode 100644 vendor/go.uber.org/atomic/.gitignore
create mode 100644 vendor/go.uber.org/atomic/.travis.yml
create mode 100644 vendor/go.uber.org/atomic/CHANGELOG.md
create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt
create mode 100644 vendor/go.uber.org/atomic/Makefile
create mode 100644 vendor/go.uber.org/atomic/README.md
create mode 100644 vendor/go.uber.org/atomic/atomic.go
create mode 100644 vendor/go.uber.org/atomic/error.go
create mode 100644 vendor/go.uber.org/atomic/go.mod
create mode 100644 vendor/go.uber.org/atomic/go.sum
create mode 100644 vendor/go.uber.org/atomic/string.go
create mode 100644 vendor/go.uber.org/multierr/.codecov.yml
create mode 100644 vendor/go.uber.org/multierr/.gitignore
create mode 100644 vendor/go.uber.org/multierr/.travis.yml
create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md
create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt
create mode 100644 vendor/go.uber.org/multierr/Makefile
create mode 100644 vendor/go.uber.org/multierr/README.md
create mode 100644 vendor/go.uber.org/multierr/error.go
create mode 100644 vendor/go.uber.org/multierr/glide.yaml
create mode 100644 vendor/go.uber.org/multierr/go.mod
create mode 100644 vendor/go.uber.org/multierr/go.sum
create mode 100644 vendor/go.uber.org/multierr/go113.go
create mode 100644 vendor/go.uber.org/zap/.codecov.yml
create mode 100644 vendor/go.uber.org/zap/.gitignore
create mode 100644 vendor/go.uber.org/zap/.readme.tmpl
create mode 100644 vendor/go.uber.org/zap/.travis.yml
create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md
create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md
create mode 100644 vendor/go.uber.org/zap/FAQ.md
create mode 100644 vendor/go.uber.org/zap/LICENSE.txt
create mode 100644 vendor/go.uber.org/zap/Makefile
create mode 100644 vendor/go.uber.org/zap/README.md
create mode 100644 vendor/go.uber.org/zap/array.go
create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go
create mode 100644 vendor/go.uber.org/zap/buffer/pool.go
create mode 100644 vendor/go.uber.org/zap/checklicense.sh
create mode 100644 vendor/go.uber.org/zap/config.go
create mode 100644 vendor/go.uber.org/zap/doc.go
create mode 100644 vendor/go.uber.org/zap/encoder.go
create mode 100644 vendor/go.uber.org/zap/error.go
create mode 100644 vendor/go.uber.org/zap/field.go
create mode 100644 vendor/go.uber.org/zap/flag.go
create mode 100644 vendor/go.uber.org/zap/glide.yaml
create mode 100644 vendor/go.uber.org/zap/global.go
create mode 100644 vendor/go.uber.org/zap/global_go112.go
create mode 100644 vendor/go.uber.org/zap/global_prego112.go
create mode 100644 vendor/go.uber.org/zap/go.mod
create mode 100644 vendor/go.uber.org/zap/go.sum
create mode 100644 vendor/go.uber.org/zap/http_handler.go
create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
create mode 100644 vendor/go.uber.org/zap/internal/color/color.go
create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go
create mode 100644 vendor/go.uber.org/zap/level.go
create mode 100644 vendor/go.uber.org/zap/logger.go
create mode 100644 vendor/go.uber.org/zap/options.go
create mode 100644 vendor/go.uber.org/zap/sink.go
create mode 100644 vendor/go.uber.org/zap/stacktrace.go
create mode 100644 vendor/go.uber.org/zap/sugar.go
create mode 100644 vendor/go.uber.org/zap/time.go
create mode 100644 vendor/go.uber.org/zap/writer.go
create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go
create mode 100644 vendor/go.uber.org/zap/zapcore/core.go
create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go
create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go
create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go
create mode 100644 vendor/go.uber.org/zap/zapcore/error.go
create mode 100644 vendor/go.uber.org/zap/zapcore/field.go
create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go
create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go
create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go
create mode 100644 vendor/go.uber.org/zap/zapcore/level.go
create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go
create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go
create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go
create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go
create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go
create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go
create mode 100644 vendor/golang.org/x/crypto/AUTHORS
create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/crypto/LICENSE
create mode 100644 vendor/golang.org/x/crypto/PATENTS
create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go
create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go
create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
create mode 100644 vendor/golang.org/x/crypto/chacha20/xor.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s
create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_generic.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go
create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go
create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go
create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_compat.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_go1.13.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_generic.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go
create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s
create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go
create mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go
create mode 100644 vendor/golang.org/x/crypto/ssh/certs.go
create mode 100644 vendor/golang.org/x/crypto/ssh/channel.go
create mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go
create mode 100644 vendor/golang.org/x/crypto/ssh/client.go
create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go
create mode 100644 vendor/golang.org/x/crypto/ssh/common.go
create mode 100644 vendor/golang.org/x/crypto/ssh/connection.go
create mode 100644 vendor/golang.org/x/crypto/ssh/doc.go
create mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go
create mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go
create mode 100644 vendor/golang.org/x/crypto/ssh/kex.go
create mode 100644 vendor/golang.org/x/crypto/ssh/keys.go
create mode 100644 vendor/golang.org/x/crypto/ssh/mac.go
create mode 100644 vendor/golang.org/x/crypto/ssh/messages.go
create mode 100644 vendor/golang.org/x/crypto/ssh/mux.go
create mode 100644 vendor/golang.org/x/crypto/ssh/server.go
create mode 100644 vendor/golang.org/x/crypto/ssh/session.go
create mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go
create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go
create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go
create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go
create mode 100644 vendor/golang.org/x/crypto/ssh/transport.go
create mode 100644 vendor/golang.org/x/lint/.travis.yml
create mode 100644 vendor/golang.org/x/lint/CONTRIBUTING.md
create mode 100644 vendor/golang.org/x/lint/LICENSE
create mode 100644 vendor/golang.org/x/lint/README.md
create mode 100644 vendor/golang.org/x/lint/go.mod
create mode 100644 vendor/golang.org/x/lint/go.sum
create mode 100644 vendor/golang.org/x/lint/golint/golint.go
create mode 100644 vendor/golang.org/x/lint/golint/import.go
create mode 100644 vendor/golang.org/x/lint/golint/importcomment.go
create mode 100644 vendor/golang.org/x/lint/lint.go
create mode 100644 vendor/golang.org/x/mod/LICENSE
create mode 100644 vendor/golang.org/x/mod/PATENTS
create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go
create mode 100644 vendor/golang.org/x/mod/modfile/print.go
create mode 100644 vendor/golang.org/x/mod/modfile/read.go
create mode 100644 vendor/golang.org/x/mod/modfile/rule.go
create mode 100644 vendor/golang.org/x/mod/module/module.go
create mode 100644 vendor/golang.org/x/mod/semver/semver.go
create mode 100644 vendor/golang.org/x/net/AUTHORS
create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/net/LICENSE
create mode 100644 vendor/golang.org/x/net/PATENTS
create mode 100644 vendor/golang.org/x/net/context/context.go
create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
create mode 100644 vendor/golang.org/x/net/context/go17.go
create mode 100644 vendor/golang.org/x/net/context/go19.go
create mode 100644 vendor/golang.org/x/net/context/pre_go17.go
create mode 100644 vendor/golang.org/x/net/context/pre_go19.go
create mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go
create mode 100644 vendor/golang.org/x/net/http/httpguts/httplex.go
create mode 100644 vendor/golang.org/x/net/http2/.gitignore
create mode 100644 vendor/golang.org/x/net/http2/Dockerfile
create mode 100644 vendor/golang.org/x/net/http2/Makefile
create mode 100644 vendor/golang.org/x/net/http2/README
create mode 100644 vendor/golang.org/x/net/http2/ciphers.go
create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go
create mode 100644 vendor/golang.org/x/net/http2/databuffer.go
create mode 100644 vendor/golang.org/x/net/http2/errors.go
create mode 100644 vendor/golang.org/x/net/http2/flow.go
create mode 100644 vendor/golang.org/x/net/http2/frame.go
create mode 100644 vendor/golang.org/x/net/http2/go111.go
create mode 100644 vendor/golang.org/x/net/http2/gotrack.go
create mode 100644 vendor/golang.org/x/net/http2/headermap.go
create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go
create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go
create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go
create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go
create mode 100644 vendor/golang.org/x/net/http2/http2.go
create mode 100644 vendor/golang.org/x/net/http2/not_go111.go
create mode 100644 vendor/golang.org/x/net/http2/pipe.go
create mode 100644 vendor/golang.org/x/net/http2/server.go
create mode 100644 vendor/golang.org/x/net/http2/transport.go
create mode 100644 vendor/golang.org/x/net/http2/write.go
create mode 100644 vendor/golang.org/x/net/http2/writesched.go
create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go
create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go
create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/punycode.go
create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/trie.go
create mode 100644 vendor/golang.org/x/net/idna/trieval.go
create mode 100644 vendor/golang.org/x/oauth2/.travis.yml
create mode 100644 vendor/golang.org/x/oauth2/AUTHORS
create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md
create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/oauth2/LICENSE
create mode 100644 vendor/golang.org/x/oauth2/README.md
create mode 100644 vendor/golang.org/x/oauth2/go.mod
create mode 100644 vendor/golang.org/x/oauth2/go.sum
create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go
create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go
create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go
create mode 100644 vendor/golang.org/x/oauth2/internal/token.go
create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go
create mode 100644 vendor/golang.org/x/oauth2/oauth2.go
create mode 100644 vendor/golang.org/x/oauth2/token.go
create mode 100644 vendor/golang.org/x/oauth2/transport.go
create mode 100644 vendor/golang.org/x/sync/AUTHORS
create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/sync/LICENSE
create mode 100644 vendor/golang.org/x/sync/PATENTS
create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go
create mode 100644 vendor/golang.org/x/sys/AUTHORS
create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/sys/LICENSE
create mode 100644 vendor/golang.org/x/sys/PATENTS
create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go
create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
create mode 100644 vendor/golang.org/x/sys/execabs/execabs.go
create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
create mode 100644 vendor/golang.org/x/sys/plan9/asm.s
create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_386.s
create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s
create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
create mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/mkall.sh
create mode 100644 vendor/golang.org/x/sys/plan9/mkerrors.sh
create mode 100644 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh
create mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/race.go
create mode 100644 vendor/golang.org/x/sys/plan9/race0.go
create mode 100644 vendor/golang.org/x/sys/plan9/str.go
create mode 100644 vendor/golang.org/x/sys/plan9/syscall.go
create mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go
create mode 100644 vendor/golang.org/x/sys/unix/.gitignore
create mode 100644 vendor/golang.org/x/sys/unix/README.md
create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/aliases.go
create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/constants.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dirent.go
create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go
create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go
create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
create mode 100644 vendor/golang.org/x/sys/unix/fdset.go
create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go
create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c
create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ioctl.go
create mode 100644 vendor/golang.org/x/sys/unix/mkall.sh
create mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh
create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go
create mode 100644 vendor/golang.org/x/sys/unix/race.go
create mode 100644 vendor/golang.org/x/sys/unix/race0.go
create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go
create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go
create mode 100644 vendor/golang.org/x/sys/unix/str.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go
create mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/windows/aliases.go
create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/empty.s
create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go
create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash
create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash
create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go
create mode 100644 vendor/golang.org/x/sys/windows/race.go
create mode 100644 vendor/golang.org/x/sys/windows/race0.go
create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/service.go
create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/str.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go
create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go
create mode 100644 vendor/golang.org/x/term/AUTHORS
create mode 100644 vendor/golang.org/x/term/CONTRIBUTING.md
create mode 100644 vendor/golang.org/x/term/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/term/LICENSE
create mode 100644 vendor/golang.org/x/term/PATENTS
create mode 100644 vendor/golang.org/x/term/README.md
create mode 100644 vendor/golang.org/x/term/go.mod
create mode 100644 vendor/golang.org/x/term/go.sum
create mode 100644 vendor/golang.org/x/term/term.go
create mode 100644 vendor/golang.org/x/term/term_plan9.go
create mode 100644 vendor/golang.org/x/term/term_solaris.go
create mode 100644 vendor/golang.org/x/term/term_unix.go
create mode 100644 vendor/golang.org/x/term/term_unix_aix.go
create mode 100644 vendor/golang.org/x/term/term_unix_bsd.go
create mode 100644 vendor/golang.org/x/term/term_unix_linux.go
create mode 100644 vendor/golang.org/x/term/term_unix_zos.go
create mode 100644 vendor/golang.org/x/term/term_unsupported.go
create mode 100644 vendor/golang.org/x/term/term_windows.go
create mode 100644 vendor/golang.org/x/term/terminal.go
create mode 100644 vendor/golang.org/x/text/AUTHORS
create mode 100644 vendor/golang.org/x/text/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/text/LICENSE
create mode 100644 vendor/golang.org/x/text/PATENTS
create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go
create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
create mode 100644 vendor/golang.org/x/text/transform/transform.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go
create mode 100644 vendor/golang.org/x/text/width/kind_string.go
create mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go
create mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go
create mode 100644 vendor/golang.org/x/text/width/tables12.0.0.go
create mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go
create mode 100644 vendor/golang.org/x/text/width/transform.go
create mode 100644 vendor/golang.org/x/text/width/trieval.go
create mode 100644 vendor/golang.org/x/text/width/width.go
create mode 100644 vendor/golang.org/x/time/AUTHORS
create mode 100644 vendor/golang.org/x/time/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/time/LICENSE
create mode 100644 vendor/golang.org/x/time/PATENTS
create mode 100644 vendor/golang.org/x/time/rate/rate.go
create mode 100644 vendor/golang.org/x/tools/AUTHORS
create mode 100644 vendor/golang.org/x/tools/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/tools/LICENSE
create mode 100644 vendor/golang.org/x/tools/PATENTS
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go
create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go
create mode 100644 vendor/golang.org/x/tools/go/packages/external.go
create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go
create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go
create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go
create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go
create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go
create mode 100644 vendor/golang.org/x/tools/imports/forward.go
create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go
create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go
create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go
create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go
create mode 100644 vendor/golang.org/x/tools/internal/event/event.go
create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go
create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go
create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go
create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go
create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go
create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go
create mode 100644 vendor/golang.org/x/tools/internal/imports/fix.go
create mode 100644 vendor/golang.org/x/tools/internal/imports/imports.go
create mode 100644 vendor/golang.org/x/tools/internal/imports/mod.go
create mode 100644 vendor/golang.org/x/tools/internal/imports/mod_cache.go
create mode 100644 vendor/golang.org/x/tools/internal/imports/sortimports.go
create mode 100644 vendor/golang.org/x/tools/internal/imports/zstdlib.go
create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go
create mode 100644 vendor/golang.org/x/xerrors/LICENSE
create mode 100644 vendor/golang.org/x/xerrors/PATENTS
create mode 100644 vendor/golang.org/x/xerrors/README
create mode 100644 vendor/golang.org/x/xerrors/adaptor.go
create mode 100644 vendor/golang.org/x/xerrors/codereview.cfg
create mode 100644 vendor/golang.org/x/xerrors/doc.go
create mode 100644 vendor/golang.org/x/xerrors/errors.go
create mode 100644 vendor/golang.org/x/xerrors/fmt.go
create mode 100644 vendor/golang.org/x/xerrors/format.go
create mode 100644 vendor/golang.org/x/xerrors/frame.go
create mode 100644 vendor/golang.org/x/xerrors/go.mod
create mode 100644 vendor/golang.org/x/xerrors/internal/internal.go
create mode 100644 vendor/golang.org/x/xerrors/wrap.go
create mode 100644 vendor/gomodules.xyz/jsonpatch/v2/LICENSE
create mode 100644 vendor/gomodules.xyz/jsonpatch/v2/go.mod
create mode 100644 vendor/gomodules.xyz/jsonpatch/v2/go.sum
create mode 100644 vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
create mode 100644 vendor/google.golang.org/appengine/LICENSE
create mode 100644 vendor/google.golang.org/appengine/internal/api.go
create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go
create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go
create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go
create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto
create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
create mode 100644 vendor/google.golang.org/appengine/internal/identity.go
create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go
create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go
create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go
create mode 100644 vendor/google.golang.org/appengine/internal/internal.go
create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto
create mode 100644 vendor/google.golang.org/appengine/internal/main.go
create mode 100644 vendor/google.golang.org/appengine/internal/main_common.go
create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go
create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go
create mode 100644 vendor/google.golang.org/appengine/internal/net.go
create mode 100644 vendor/google.golang.org/appengine/internal/regen.sh
create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go
create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go
create mode 100644 vendor/google.golang.org/protobuf/AUTHORS
create mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS
create mode 100644 vendor/google.golang.org/protobuf/LICENSE
create mode 100644 vendor/google.golang.org/protobuf/PATENTS
create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go
create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go
create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go
create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go
create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go
create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go
create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go
create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go
create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go
create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genname/name.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go
create mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go
create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go
create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go
create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go
create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go
create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go
create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go
create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go
create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go
create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go
create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go
create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go
create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go
create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go
create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go
create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go
create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go
create mode 100644 vendor/google.golang.org/protobuf/proto/size.go
create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go
create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
create mode 100644 vendor/gopkg.in/inf.v0/LICENSE
create mode 100644 vendor/gopkg.in/inf.v0/dec.go
create mode 100644 vendor/gopkg.in/inf.v0/rounder.go
create mode 100644 vendor/gopkg.in/tomb.v1/LICENSE
create mode 100644 vendor/gopkg.in/tomb.v1/README.md
create mode 100644 vendor/gopkg.in/tomb.v1/tomb.go
create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml
create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE
create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml
create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE
create mode 100644 vendor/gopkg.in/yaml.v2/README.md
create mode 100644 vendor/gopkg.in/yaml.v2/apic.go
create mode 100644 vendor/gopkg.in/yaml.v2/decode.go
create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go
create mode 100644 vendor/gopkg.in/yaml.v2/encode.go
create mode 100644 vendor/gopkg.in/yaml.v2/go.mod
create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go
create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go
create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go
create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go
create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go
create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go
create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go
create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go
create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go
create mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml
create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE
create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE
create mode 100644 vendor/gopkg.in/yaml.v3/README.md
create mode 100644 vendor/gopkg.in/yaml.v3/apic.go
create mode 100644 vendor/gopkg.in/yaml.v3/decode.go
create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go
create mode 100644 vendor/gopkg.in/yaml.v3/encode.go
create mode 100644 vendor/gopkg.in/yaml.v3/go.mod
create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go
create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go
create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go
create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go
create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go
create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go
create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go
create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go
create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go
create mode 100644 vendor/k8s.io/api/LICENSE
create mode 100644 vendor/k8s.io/api/admission/v1/doc.go
create mode 100644 vendor/k8s.io/api/admission/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/admission/v1/generated.proto
create mode 100644 vendor/k8s.io/api/admission/v1/register.go
create mode 100644 vendor/k8s.io/api/admission/v1/types.go
create mode 100644 vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/admission/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/doc.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/generated.proto
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/register.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/types.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/apps/v1/doc.go
create mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/apps/v1/generated.proto
create mode 100644 vendor/k8s.io/api/apps/v1/register.go
create mode 100644 vendor/k8s.io/api/apps/v1/types.go
create mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/apps/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/doc.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto
create mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/authentication/v1/doc.go
create mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto
create mode 100644 vendor/k8s.io/api/authentication/v1/register.go
create mode 100644 vendor/k8s.io/api/authentication/v1/types.go
create mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/authorization/v1/doc.go
create mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/authorization/v1/generated.proto
create mode 100644 vendor/k8s.io/api/authorization/v1/register.go
create mode 100644 vendor/k8s.io/api/authorization/v1/types.go
create mode 100644 vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/doc.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.proto
create mode 100644 vendor/k8s.io/api/autoscaling/v1/register.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/types.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/doc.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/register.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/batch/v1/doc.go
create mode 100644 vendor/k8s.io/api/batch/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/batch/v1/generated.proto
create mode 100644 vendor/k8s.io/api/batch/v1/register.go
create mode 100644 vendor/k8s.io/api/batch/v1/types.go
create mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/register.go
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.go
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/certificates/v1/doc.go
create mode 100644 vendor/k8s.io/api/certificates/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/certificates/v1/generated.proto
create mode 100644 vendor/k8s.io/api/certificates/v1/register.go
create mode 100644 vendor/k8s.io/api/certificates/v1/types.go
create mode 100644 vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/coordination/v1/doc.go
create mode 100644 vendor/k8s.io/api/coordination/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto
create mode 100644 vendor/k8s.io/api/coordination/v1/register.go
create mode 100644 vendor/k8s.io/api/coordination/v1/types.go
create mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go
create mode 100644 vendor/k8s.io/api/core/v1/doc.go
create mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/core/v1/generated.proto
create mode 100644 vendor/k8s.io/api/core/v1/lifecycle.go
create mode 100644 vendor/k8s.io/api/core/v1/objectreference.go
create mode 100644 vendor/k8s.io/api/core/v1/register.go
create mode 100644 vendor/k8s.io/api/core/v1/resource.go
create mode 100644 vendor/k8s.io/api/core/v1/taint.go
create mode 100644 vendor/k8s.io/api/core/v1/toleration.go
create mode 100644 vendor/k8s.io/api/core/v1/types.go
create mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go
create mode 100644 vendor/k8s.io/api/core/v1/well_known_taints.go
create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go
create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/events/v1/doc.go
create mode 100644 vendor/k8s.io/api/events/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/events/v1/generated.proto
create mode 100644 vendor/k8s.io/api/events/v1/register.go
create mode 100644 vendor/k8s.io/api/events/v1/types.go
create mode 100644 vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/events/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/networking/v1/doc.go
create mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/networking/v1/generated.proto
create mode 100644 vendor/k8s.io/api/networking/v1/register.go
create mode 100644 vendor/k8s.io/api/networking/v1/types.go
create mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/node/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/node/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/policy/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/rbac/v1/doc.go
create mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/rbac/v1/generated.proto
create mode 100644 vendor/k8s.io/api/rbac/v1/register.go
create mode 100644 vendor/k8s.io/api/rbac/v1/types.go
create mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto
create mode 100644 vendor/k8s.io/api/scheduling/v1/register.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/types.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/storage/v1/doc.go
create mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go
create mode 100644 vendor/k8s.io/api/storage/v1/generated.proto
create mode 100644 vendor/k8s.io/api/storage/v1/register.go
create mode 100644 vendor/k8s.io/api/storage/v1/types.go
create mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/doc.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/api/storage/v1beta1/register.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/LICENSE
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/apimachinery/LICENSE
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/math.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/converter.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/helper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/fields.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/requirements.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/selector.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/labels.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/selector.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/converter.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/error.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/extension.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/helper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/register.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/selection/operator.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/types/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/types/nodename.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/types/patch.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/types/uid.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/duration/duration.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/json.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/http.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/interface.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/util.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/string.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/version/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/version/types.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/doc.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/filter.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/mux.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/watch.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS
create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
create mode 100644 vendor/k8s.io/apiserver/LICENSE
create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go
create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go
create mode 100644 vendor/k8s.io/autoscaler/LICENSE
create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/doc.go
create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/register.go
create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/types.go
create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/LICENSE
create mode 100644 vendor/k8s.io/client-go/discovery/discovery_client.go
create mode 100644 vendor/k8s.io/client-go/discovery/doc.go
create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery.go
create mode 100644 vendor/k8s.io/client-go/discovery/helper.go
create mode 100644 vendor/k8s.io/client-go/dynamic/interface.go
create mode 100644 vendor/k8s.io/client-go/dynamic/scheme.go
create mode 100644 vendor/k8s.io/client-go/dynamic/simple.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/clientset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/import.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/register.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
create mode 100644 vendor/k8s.io/client-go/metadata/interface.go
create mode 100644 vendor/k8s.io/client-go/metadata/metadata.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/pkg/version/.gitattributes
create mode 100644 vendor/k8s.io/client-go/pkg/version/base.go
create mode 100644 vendor/k8s.io/client-go/pkg/version/def.bzl
create mode 100644 vendor/k8s.io/client-go/pkg/version/doc.go
create mode 100644 vendor/k8s.io/client-go/pkg/version/version.go
create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
create mode 100644 vendor/k8s.io/client-go/rest/OWNERS
create mode 100644 vendor/k8s.io/client-go/rest/client.go
create mode 100644 vendor/k8s.io/client-go/rest/config.go
create mode 100644 vendor/k8s.io/client-go/rest/plugin.go
create mode 100644 vendor/k8s.io/client-go/rest/request.go
create mode 100644 vendor/k8s.io/client-go/rest/transport.go
create mode 100644 vendor/k8s.io/client-go/rest/url_utils.go
create mode 100644 vendor/k8s.io/client-go/rest/urlbackoff.go
create mode 100644 vendor/k8s.io/client-go/rest/warnings.go
create mode 100644 vendor/k8s.io/client-go/rest/watch/decoder.go
create mode 100644 vendor/k8s.io/client-go/rest/watch/encoder.go
create mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/restmapper/category_expansion.go
create mode 100644 vendor/k8s.io/client-go/restmapper/discovery.go
create mode 100644 vendor/k8s.io/client-go/restmapper/shortcut.go
create mode 100644 vendor/k8s.io/client-go/testing/actions.go
create mode 100644 vendor/k8s.io/client-go/testing/fake.go
create mode 100644 vendor/k8s.io/client-go/testing/fixture.go
create mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS
create mode 100644 vendor/k8s.io/client-go/tools/auth/clientauth.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS
create mode 100644 vendor/k8s.io/client-go/tools/cache/controller.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/delta_fifo.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/fifo.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/index.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/listers.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/listwatch.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_cache.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_detector.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/shared_informer.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/store.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/undelta_store.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/register.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/types.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/client_config.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/config.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/flag.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/helpers.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/loader.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/overrides.go
create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/validation.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/OWNERS
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/metrics.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go
create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS
create mode 100644 vendor/k8s.io/client-go/tools/metrics/metrics.go
create mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go
create mode 100644 vendor/k8s.io/client-go/tools/portforward/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/portforward/portforward.go
create mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS
create mode 100644 vendor/k8s.io/client-go/tools/record/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/record/event.go
create mode 100644 vendor/k8s.io/client-go/tools/record/events_cache.go
create mode 100644 vendor/k8s.io/client-go/tools/record/fake.go
create mode 100644 vendor/k8s.io/client-go/tools/record/util/util.go
create mode 100644 vendor/k8s.io/client-go/tools/reference/ref.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go
create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go
create mode 100644 vendor/k8s.io/client-go/transport/OWNERS
create mode 100644 vendor/k8s.io/client-go/transport/cache.go
create mode 100644 vendor/k8s.io/client-go/transport/cert_rotation.go
create mode 100644 vendor/k8s.io/client-go/transport/config.go
create mode 100644 vendor/k8s.io/client-go/transport/round_trippers.go
create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go
create mode 100644 vendor/k8s.io/client-go/transport/token_source.go
create mode 100644 vendor/k8s.io/client-go/transport/transport.go
create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS
create mode 100644 vendor/k8s.io/client-go/util/cert/cert.go
create mode 100644 vendor/k8s.io/client-go/util/cert/csr.go
create mode 100644 vendor/k8s.io/client-go/util/cert/io.go
create mode 100644 vendor/k8s.io/client-go/util/cert/pem.go
create mode 100644 vendor/k8s.io/client-go/util/cert/server_inspection.go
create mode 100644 vendor/k8s.io/client-go/util/connrotation/connrotation.go
create mode 100644 vendor/k8s.io/client-go/util/exec/exec.go
create mode 100644 vendor/k8s.io/client-go/util/flowcontrol/backoff.go
create mode 100644 vendor/k8s.io/client-go/util/flowcontrol/throttle.go
create mode 100644 vendor/k8s.io/client-go/util/homedir/homedir.go
create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS
create mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go
create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS
create mode 100644 vendor/k8s.io/client-go/util/retry/util.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/doc.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/metrics.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/parallelizer.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/queue.go
create mode 100644 vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
create mode 100644 vendor/k8s.io/cluster-bootstrap/LICENSE
create mode 100644 vendor/k8s.io/cluster-bootstrap/token/api/doc.go
create mode 100644 vendor/k8s.io/cluster-bootstrap/token/api/types.go
create mode 100644 vendor/k8s.io/cluster-bootstrap/token/util/helpers.go
create mode 100644 vendor/k8s.io/code-generator/CONTRIBUTING.md
create mode 100644 vendor/k8s.io/code-generator/LICENSE
create mode 100644 vendor/k8s.io/code-generator/OWNERS
create mode 100644 vendor/k8s.io/code-generator/README.md
create mode 100644 vendor/k8s.io/code-generator/SECURITY_CONTACTS
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/README.md
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/path/path.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go
create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/types.go
create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go
create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go
create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/.gitignore
create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/README.md
create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go
create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go
create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go
create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/args/args.go
create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go
create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go
create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/.gitignore
create mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/main.go
create mode 100644 vendor/k8s.io/code-generator/code-of-conduct.md
create mode 100644 vendor/k8s.io/code-generator/generate-groups.sh
create mode 100644 vendor/k8s.io/code-generator/generate-internal-groups.sh
create mode 100644 vendor/k8s.io/code-generator/go.mod
create mode 100644 vendor/k8s.io/code-generator/go.sum
create mode 100644 vendor/k8s.io/code-generator/pkg/namer/tag-override.go
create mode 100644 vendor/k8s.io/code-generator/pkg/util/build.go
create mode 100644 vendor/k8s.io/code-generator/pkg/util/plural_exceptions.go
create mode 100644 vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go
create mode 100644 vendor/k8s.io/code-generator/tools.go
create mode 100644 vendor/k8s.io/component-base/LICENSE
create mode 100644 vendor/k8s.io/component-base/config/OWNERS
create mode 100644 vendor/k8s.io/component-base/config/doc.go
create mode 100644 vendor/k8s.io/component-base/config/types.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/conversion.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/defaults.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/doc.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/register.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/types.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/component-base/config/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/gengo/LICENSE
create mode 100644 vendor/k8s.io/gengo/args/args.go
create mode 100644 vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go
create mode 100644 vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go
create mode 100644 vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/sets.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/tags.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/byte.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/doc.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/empty.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/int.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/int64.go
create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/string.go
create mode 100644 vendor/k8s.io/gengo/generator/default_generator.go
create mode 100644 vendor/k8s.io/gengo/generator/default_package.go
create mode 100644 vendor/k8s.io/gengo/generator/doc.go
create mode 100644 vendor/k8s.io/gengo/generator/error_tracker.go
create mode 100644 vendor/k8s.io/gengo/generator/execute.go
create mode 100644 vendor/k8s.io/gengo/generator/generator.go
create mode 100644 vendor/k8s.io/gengo/generator/import_tracker.go
create mode 100644 vendor/k8s.io/gengo/generator/snippet_writer.go
create mode 100644 vendor/k8s.io/gengo/generator/transitive_closure.go
create mode 100644 vendor/k8s.io/gengo/namer/doc.go
create mode 100644 vendor/k8s.io/gengo/namer/import_tracker.go
create mode 100644 vendor/k8s.io/gengo/namer/namer.go
create mode 100644 vendor/k8s.io/gengo/namer/order.go
create mode 100644 vendor/k8s.io/gengo/namer/plural_namer.go
create mode 100644 vendor/k8s.io/gengo/parser/doc.go
create mode 100644 vendor/k8s.io/gengo/parser/parse.go
create mode 100644 vendor/k8s.io/gengo/types/comments.go
create mode 100644 vendor/k8s.io/gengo/types/doc.go
create mode 100644 vendor/k8s.io/gengo/types/flatten.go
create mode 100644 vendor/k8s.io/gengo/types/types.go
create mode 100644 vendor/k8s.io/helm/LICENSE
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/capabilities.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/chartfile.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/create.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/doc.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/expand.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/files.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/load.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/requirements.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/save.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/transform.go
create mode 100644 vendor/k8s.io/helm/pkg/chartutil/values.go
create mode 100644 vendor/k8s.io/helm/pkg/engine/doc.go
create mode 100644 vendor/k8s.io/helm/pkg/engine/engine.go
create mode 100644 vendor/k8s.io/helm/pkg/ignore/doc.go
create mode 100644 vendor/k8s.io/helm/pkg/ignore/rules.go
create mode 100644 vendor/k8s.io/helm/pkg/manifest/doc.go
create mode 100644 vendor/k8s.io/helm/pkg/manifest/splitter.go
create mode 100644 vendor/k8s.io/helm/pkg/manifest/types.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/chart.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/config.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/template.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/version/version.pb.go
create mode 100644 vendor/k8s.io/helm/pkg/releaseutil/filter.go
create mode 100644 vendor/k8s.io/helm/pkg/releaseutil/manifest.go
create mode 100644 vendor/k8s.io/helm/pkg/releaseutil/sorter.go
create mode 100644 vendor/k8s.io/helm/pkg/sympath/walk.go
create mode 100644 vendor/k8s.io/helm/pkg/timeconv/doc.go
create mode 100644 vendor/k8s.io/helm/pkg/timeconv/timeconv.go
create mode 100644 vendor/k8s.io/helm/pkg/version/compatible.go
create mode 100644 vendor/k8s.io/helm/pkg/version/doc.go
create mode 100644 vendor/k8s.io/helm/pkg/version/version.go
create mode 100644 vendor/k8s.io/klog/.travis.yml
create mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md
create mode 100644 vendor/k8s.io/klog/LICENSE
create mode 100644 vendor/k8s.io/klog/OWNERS
create mode 100644 vendor/k8s.io/klog/README.md
create mode 100644 vendor/k8s.io/klog/RELEASE.md
create mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS
create mode 100644 vendor/k8s.io/klog/code-of-conduct.md
create mode 100644 vendor/k8s.io/klog/go.mod
create mode 100644 vendor/k8s.io/klog/go.sum
create mode 100644 vendor/k8s.io/klog/klog.go
create mode 100644 vendor/k8s.io/klog/klog_file.go
create mode 100644 vendor/k8s.io/klog/v2/.gitignore
create mode 100644 vendor/k8s.io/klog/v2/CONTRIBUTING.md
create mode 100644 vendor/k8s.io/klog/v2/LICENSE
create mode 100644 vendor/k8s.io/klog/v2/OWNERS
create mode 100644 vendor/k8s.io/klog/v2/README.md
create mode 100644 vendor/k8s.io/klog/v2/RELEASE.md
create mode 100644 vendor/k8s.io/klog/v2/SECURITY_CONTACTS
create mode 100644 vendor/k8s.io/klog/v2/code-of-conduct.md
create mode 100644 vendor/k8s.io/klog/v2/go.mod
create mode 100644 vendor/k8s.io/klog/v2/go.sum
create mode 100644 vendor/k8s.io/klog/v2/klog.go
create mode 100644 vendor/k8s.io/klog/v2/klog_file.go
create mode 100644 vendor/k8s.io/kube-aggregator/LICENSE
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiregistration_client.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/doc.go
create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/generated_expansion.go
create mode 100644 vendor/k8s.io/kube-openapi/LICENSE
create mode 100644 vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go
create mode 100644 vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/common.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/doc.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/README.md
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/config.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/extension.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/union.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS
create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/string.go
create mode 100644 vendor/k8s.io/utils/LICENSE
create mode 100644 vendor/k8s.io/utils/buffer/ring_growing.go
create mode 100644 vendor/k8s.io/utils/integer/integer.go
create mode 100644 vendor/k8s.io/utils/pointer/OWNERS
create mode 100644 vendor/k8s.io/utils/pointer/README.md
create mode 100644 vendor/k8s.io/utils/pointer/pointer.go
create mode 100644 vendor/k8s.io/utils/trace/README.md
create mode 100644 vendor/k8s.io/utils/trace/trace.go
create mode 100644 vendor/modules.txt
create mode 100644 vendor/sigs.k8s.io/cli-utils/LICENSE
create mode 100644 vendor/sigs.k8s.io/cli-utils/LICENSE_TEMPLATE
create mode 100644 vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/core.go
create mode 100644 vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/doc.go
create mode 100644 vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/generic.go
create mode 100644 vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/status.go
create mode 100644 vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/util.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/.gitignore
create mode 100644 vendor/sigs.k8s.io/controller-runtime/.golangci.yml
create mode 100644 vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md
create mode 100644 vendor/sigs.k8s.io/controller-runtime/FAQ.md
create mode 100644 vendor/sigs.k8s.io/controller-runtime/LICENSE
create mode 100644 vendor/sigs.k8s.io/controller-runtime/Makefile
create mode 100644 vendor/sigs.k8s.io/controller-runtime/OWNERS
create mode 100644 vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES
create mode 100644 vendor/sigs.k8s.io/controller-runtime/README.md
create mode 100644 vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS
create mode 100644 vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md
create mode 100644 vendor/sigs.k8s.io/controller-runtime/VERSIONING.md
create mode 100644 vendor/sigs.k8s.io/controller-runtime/alias.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md
create mode 100644 vendor/sigs.k8s.io/controller-runtime/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/go.mod
create mode 100644 vendor/sigs.k8s.io/controller-runtime/go.sum
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/flags.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/client_builder.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/LICENSE
create mode 100644 vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/convert.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/nested.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/set.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/version/version.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/conv.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go
create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go
create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go
create mode 100644 vendor/sigs.k8s.io/yaml/.gitignore
create mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml
create mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
create mode 100644 vendor/sigs.k8s.io/yaml/LICENSE
create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS
create mode 100644 vendor/sigs.k8s.io/yaml/README.md
create mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md
create mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
create mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md
create mode 100644 vendor/sigs.k8s.io/yaml/fields.go
create mode 100644 vendor/sigs.k8s.io/yaml/go.mod
create mode 100644 vendor/sigs.k8s.io/yaml/go.sum
create mode 100644 vendor/sigs.k8s.io/yaml/yaml.go
create mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go
diff --git a/.ci/component_descriptor b/.ci/component_descriptor
new file mode 100755
index 0000000..e9d04cd
--- /dev/null
+++ b/.ci/component_descriptor
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+
+import dataclasses
+import os
+import yaml
+
+import ci.util
+import gci.componentmodel
+import util
+
+
+component_descriptor_base_path = os.path.abspath(util.check_env('BASE_DEFINITION_PATH'))
+component_descriptor_path = os.path.abspath(util.check_env('COMPONENT_DESCRIPTOR_PATH'))
+repo_path = os.path.abspath(util.check_env('MAIN_REPO_DIR'))
+
+
+def parse_component_descriptor():
+ component_descriptor_v2 = gci.componentmodel.ComponentDescriptor.from_dict(
+ ci.util.parse_yaml_file(component_descriptor_base_path)
+ )
+
+ return component_descriptor_v2
+
+
+def add_image_dependency(component, image_name, image_reference, image_version):
+ resource_access = gci.componentmodel.OciAccess(
+ type=gci.componentmodel.AccessType.OCI_REGISTRY,
+ imageReference=image_reference,
+ )
+ component.resources.append(
+ gci.componentmodel.Resource(
+ name=image_name,
+ version=image_version,
+ type=gci.componentmodel.ResourceType.OCI_IMAGE,
+ access=resource_access,
+ ),
+ )
+
+
+def add_component_dependency(component, dependency_name, dependency_version):
+ component.componentReferences.append(
+ gci.componentmodel.ComponentReference(
+ name=dependency_name,
+ componentName=dependency_name,
+ version=dependency_version,
+ labels=[],
+ )
+ )
+
+
+component_descriptor = parse_component_descriptor()
+own_component = component_descriptor.component
+
+images_list_path = os.path.join(repo_path, 'charts', 'images.yaml')
+
+with open(images_list_path, 'r') as f:
+ images_list_contents = yaml.safe_load(f)
+
+for image in images_list_contents.get('images', []):
+ # use same heuristics as before: if the image's repository starts with
+ # 'eu.gcr.io/gardener-project' assume it's one of our components ...
+ # NOTE: Usually that is 'eu.gcr.io/gardener-project/gardener', but for this
+ # component (or rather: its' dependencies) the image repository is
+ # different.
+ if image['repository'].startswith('eu.gcr.io/gardener-project'):
+ add_component_dependency(
+ component=own_component,
+ dependency_name=image['sourceRepository'],
+ dependency_version=image['tag'],
+ )
+ # ... otherwise assume it's an image dependency
+ else:
+ add_image_dependency(
+ component=own_component,
+ image_name=image['name'],
+ image_reference=image['repository'],
+ image_version=image['tag'],
+ )
+
+# write generated component descriptor back out
+with open(component_descriptor_path, 'w') as f:
+ yaml.dump(
+ data=dataclasses.asdict(component_descriptor),
+ Dumper=gci.componentmodel.EnumValueYamlDumper,
+ stream=f,
+ )
diff --git a/.ci/pipeline_definitions b/.ci/pipeline_definitions
new file mode 100644
index 0000000..13bf7b7
--- /dev/null
+++ b/.ci/pipeline_definitions
@@ -0,0 +1,46 @@
+gardener-extension-shoot-fleet-agent:
+ template: 'default'
+ base_definition:
+ repo: ~
+ traits:
+ version:
+ preprocess: 'inject-commit-hash'
+ publish:
+ dockerimages:
+ gardener-extension-shoot-fleet-agent:
+ registry: 'gcr-readwrite'
+ image: 'eu.gcr.io/gardener-project/gardener/extensions/shoot-fleet-agent'
+ dockerfile: 'Dockerfile'
+ target: gardener-extension-shoot-fleet-agent
+ jobs:
+ head-update:
+ traits:
+ component_descriptor: ~
+ draft_release: ~
+ options:
+ public_build_logs: true
+ pull-request:
+ traits:
+ pull-request: ~
+ component_descriptor: ~
+ options:
+ public_build_logs: true
+ release:
+ traits:
+ version:
+ preprocess: 'finalize'
+ release:
+ nextversion: 'bump_minor'
+ next_version_callback: '.ci/prepare_release'
+ release_callback: '.ci/prepare_release'
+ slack:
+ default_channel: 'internal_scp_workspace'
+ channel_cfgs:
+ internal_scp_workspace:
+ channel_name: 'C9CEBQPGE' #sap-tech-gardener
+ slack_cfg_name: 'scp_workspace'
+ component_descriptor: ~
+ publish:
+ dockerimages:
+ gardener-extension-shoot-fleet-agent:
+ tag_as_latest: true
diff --git a/.ci/prepare_release b/.ci/prepare_release
new file mode 100755
index 0000000..ceb0be1
--- /dev/null
+++ b/.ci/prepare_release
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+"$(dirname $0)"/../vendor/github.com/gardener/gardener/hack/.ci/prepare_release "$(dirname $0)"/.. github.com/gardener gardener-extension-shoot-fleet-agent
diff --git a/.ci/set_dependency_version b/.ci/set_dependency_version
new file mode 100755
index 0000000..9653082
--- /dev/null
+++ b/.ci/set_dependency_version
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+"$(dirname $0)"/../vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version
diff --git a/.ci/verify b/.ci/verify
new file mode 100755
index 0000000..5f9e6c3
--- /dev/null
+++ b/.ci/verify
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+cd "$(dirname $0)/.."
+
+git config --global user.email "gardener@sap.com"
+git config --global user.name "Gardener CI/CD"
+
+# Required due to https://github.com/kubernetes/kubernetes/issues/86753 - can be removed once the issue is fixed.
+mkdir -p /go/src/github.com/gardener/gardener-extension-shoot-fleet-agent
+cp -r . /go/src/github.com/gardener/gardener-extension-shoot-fleet-agent
+cd /go/src/github.com/gardener/gardener-extension-shoot-fleet-agent
+
+make verify-extended
diff --git a/.circleci/config.yaml b/.circleci/config.yaml
new file mode 100644
index 0000000..7f2b56e
--- /dev/null
+++ b/.circleci/config.yaml
@@ -0,0 +1,39 @@
+version: 2
+jobs:
+ build:
+ docker: # run the steps with Docker
+
+ - image: circleci/golang:1.15
+ auth:
+ username: javamachr
+ password: $DOCKERHUB_PASSWORD
+ steps:
+ - checkout # check out source code to working directory
+
+ - restore_cache:
+ keys:
+ - go-mod-v4-{{ checksum "go.sum" }}
+
+ - run:
+ name: Install requirements
+ command: make install-requirements
+
+ - run:
+ name: Generate sources
+ command: make generate
+
+ - run:
+ name: Docker login
+ command: make docker-login
+
+ - run:
+ name: Build docker images
+ command: make docker-images
+
+ - run:
+ name: Build docker images
+ command: make docker-images
+
+ - run:
+ name: Push docker images
+ command: make docker-push
\ No newline at end of file
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..7599eb2
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,22 @@
+# Ignore everything
+**
+
+# Exclude folders relevant for build
+!charts/
+!cmd/
+!docs/
+!example/
+!hack/
+!pkg/
+!test/
+!tools/
+!vendor/
+
+!.gitignore
+!.golangci.yaml
+
+!go.mod
+!go.sum
+
+!VERSION
+!Makefile
diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md
new file mode 100644
index 0000000..9a28664
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug.md
@@ -0,0 +1,39 @@
+---
+name: Bug Report
+about: Report a bug encountered while working with this Gardener extension
+labels: kind/bug
+
+---
+
+**How to categorize this issue?**
+
+/area TODO
+/kind bug
+/priority normal
+
+**What happened**:
+
+**What you expected to happen**:
+
+**How to reproduce it (as minimally and precisely as possible)**:
+
+**Anything else we need to know?**:
+
+**Environment**:
+
+- Gardener version (if relevant):
+- Extension version:
+- Kubernetes version (use `kubectl version`):
+- Cloud provider or hardware configuration:
+- Others:
diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md
new file mode 100644
index 0000000..9b95fb6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature.md
@@ -0,0 +1,27 @@
+---
+name: Enhancement Request
+about: Suggest an enhancement for this extension
+labels: kind/enhancement
+
+---
+
+**How to categorize this issue?**
+
+/area TODO
+/kind enhancement
+/priority normal
+
+**What would you like to be added**:
+
+**Why is this needed**:
diff --git a/.github/ISSUE_TEMPLATE/flaking-test.md b/.github/ISSUE_TEMPLATE/flaking-test.md
new file mode 100644
index 0000000..108e4a8
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/flaking-test.md
@@ -0,0 +1,35 @@
+---
+name: Flaking Test
+about: Report flaky tests or jobs in Gardener CI
+title: "[Flaky Test] FLAKING TEST/SUITE"
+labels: kind/flake
+
+---
+
+
+
+**How to categorize this issue?**
+
+/area testing
+/kind flake
+/priority normal
+
+**Which test(s)/suite(s) are flaking**:
+
+**CI link**:
+
+**Reason for failure**:
+
+**Anything else we need to know**:
+
diff --git a/.github/ISSUE_TEMPLATE/support.md b/.github/ISSUE_TEMPLATE/support.md
new file mode 100644
index 0000000..16ba43b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/support.md
@@ -0,0 +1,14 @@
+---
+name: Support Request
+about: Support request or question relating to this extension
+labels: kind/question
+
+---
+
+
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000..66a8b1b
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,40 @@
+**How to categorize this PR?**
+
+/area TODO
+/kind TODO
+/priority normal
+
+**What this PR does / why we need it**:
+
+**Which issue(s) this PR fixes**:
+Fixes #
+
+**Special notes for your reviewer**:
+
+**Release note**:
+
+```other operator
+
+```
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..8ec1ec3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,23 @@
+/.kube-secrets
+/tmp
+/dev
+./dev
+/local
+**/dev
+/bin
+
+*.coverprofile
+*.html
+.vscode
+.idea
+.DS_Store
+*~
+
+TODO
+
+# Virtual go & fuse
+.virtualgo
+.fuse_hidden*
+
+# Packr generated files
+*-packr.go
diff --git a/.golangci.yaml b/.golangci.yaml
new file mode 100644
index 0000000..d54e535
--- /dev/null
+++ b/.golangci.yaml
@@ -0,0 +1,7 @@
+run:
+ concurrency: 4
+ deadline: 10m
+
+linters:
+ disable:
+ - unused
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..55c92e5
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,13 @@
+############# builder
+FROM eu.gcr.io/gardener-project/3rd/golang:1.15.5 AS builder
+
+WORKDIR /go/src/github.com/gardener/gardener-extension-shoot-fleet-agent
+COPY . .
+RUN make install
+
+############# gardener-extension-shoot-fleet-agent
+FROM eu.gcr.io/gardener-project/3rd/alpine:3.12.3 AS gardener-extension-shoot-fleet-agent
+
+COPY charts /charts
+COPY --from=builder /go/bin/gardener-extension-shoot-fleet-agent /gardener-extension-shoot-fleet-agent
+ENTRYPOINT ["/gardener-extension-shoot-fleet-agent"]
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 0000000..065c127
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,288 @@
+```
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+```
+
+## APIs
+
+This project may include APIs to SAP or third party products or services. The use of these APIs, products and services may be subject to additional agreements. In no event shall the application of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products or services that would alter, expand, be inconsistent with, or supersede any terms of these additional agreements. API means application programming interfaces, as well as their respective specifications and implementing code that allows other software products to communicate with or call on SAP or third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and ABAP calls or other user exits) and may be made available through SAP or third party products, SDKs, documentation or other media.
+
+## Subcomponents
+
+This project includes the following subcomponents that are subject to separate license terms.
+Your use of these subcomponents is subject to the separate license terms applicable to
+each subcomponent.
+
+Gardener.
+https://github.com/gardener/gardener.
+Copyright (c) 2019 SAP SE or an SAP affiliate company.
+Apache 2 license (https://github.com/gardener/gardener/blob/master/LICENSE.md).
+
+controller-runtime.
+https://sigs.k8s.io/controller-runtime.
+Copyright 2019 The Kubernetes Authors.
+Apache 2 license (https://sigs.k8s.io/controller-runtime/LICENSE).
+
+API.
+https://git.k8s.io/api.
+Copyright 2019 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/api/LICENSE).
+
+APIMachinery.
+https://git.k8s.io/apimachinery.
+Copyright 2019 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/apimachinery/LICENSE).
+
+Client-Go.
+https://git.k8s.io/client-go.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/client-go/LICENSE).
+
+YAML marshaling and unmarshalling support for Go.
+gopkg.in/yaml.v2.
+Copyright 2011-2016 Canonical Ltd.
+Apache 2 license (https://github.com/go-yaml/yaml/blob/v2/LICENSE)
+
+Packr.
+https://github.com/gobuffalo/packr
+Copyright (c) 2016 Mark Bates.
+MIT license (https://github.com/gobuffalo/packr/blob/master/LICENSE.txt)
+
+Cobra.
+https://github.com/spf13/cobra
+Copyright 2019 Steve Francia.
+Apache 2 license (https://github.com/spf13/cobra/blob/master/LICENSE.txt)
+
+Ginkgo.
+https://github.com/onsi/ginkgo.
+Copyright (c) 2013-2014 Onsi Fakhouri.
+MIT license (https://github.com/onsi/ginkgo/blob/master/LICENSE)
+
+Gomega.
+github.com/onsi/gomega.
+Copyright (c) 2013-2014 Onsi Fakhouri.
+MIT license (https://github.com/onsi/gomega/blob/master/LICENSE)
+
+------
+## MIT License
+
+ The MIT License (MIT)
+
+ Copyright (c)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..102e04f
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,120 @@
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+EXTENSION_PREFIX := gardener-extension
+NAME := shoot-fleet-agent
+REGISTRY := javamachr
+IMAGE_PREFIX := $(REGISTRY)/gardener-extension
+REPO_ROOT := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+HACK_DIR := $(REPO_ROOT)/hack
+VERSION := $(shell cat "$(REPO_ROOT)/VERSION")
+LD_FLAGS := "-w -X github.com/javamachr/$(EXTENSION_PREFIX)-$(NAME)/pkg/version.Version=$(IMAGE_TAG)"
+LEADER_ELECTION := false
+IGNORE_OPERATION_ANNOTATION := true
+
+#########################################
+# Rules for local development scenarios #
+#########################################
+
+.PHONY: start
+start:
+ @LEADER_ELECTION_NAMESPACE=garden GO111MODULE=on go run \
+ -mod=vendor \
+ -ldflags $(LD_FLAGS) \
+ ./cmd/$(EXTENSION_PREFIX)-$(NAME) \
+ --ignore-operation-annotation=$(IGNORE_OPERATION_ANNOTATION) \
+ --leader-election=$(LEADER_ELECTION) \
+ --kubeconfig ./dev/kubeconfig \
+ --config=./example/00-config.yaml
+
+#################################################################
+# Rules related to binary build, Docker image build and release #
+#################################################################
+
+.PHONY: install
+install:
+ @LD_FLAGS="-w -X github.com/gardener/$(EXTENSION_PREFIX)-$(NAME)/pkg/version.Version=$(VERSION)" \
+ $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/install.sh ./...
+
+.PHONY: docker-login
+docker-login:
+ echo $DOCKER_PASS | docker login -u $DOCKER_USER --password-stdin
+
+.PHONY: docker-images
+docker-images:
+ @docker build -t $(IMAGE_PREFIX)-$(NAME):$(VERSION) -t $(IMAGE_PREFIX)-$(NAME):latest -f Dockerfile -m 6g --target $(EXTENSION_PREFIX)-$(NAME) .
+
+.PHONY: docker-push
+docker-push:
+ @docker push $(IMAGE_PREFIX)-$(NAME):$(VERSION)
+
+#####################################################################
+# Rules for verification, formatting, linting, testing and cleaning #
+#####################################################################
+
+.PHONY: install-requirements
+install-requirements:
+ @go install -mod=vendor $(REPO_ROOT)/vendor/github.com/ahmetb/gen-crd-api-reference-docs
+ @go install -mod=vendor $(REPO_ROOT)/vendor/github.com/gobuffalo/packr/v2/packr2
+ @go install -mod=vendor $(REPO_ROOT)/vendor/github.com/golang/mock/mockgen
+ @go install -mod=vendor $(REPO_ROOT)/vendor/github.com/onsi/ginkgo/ginkgo
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/install-requirements.sh
+
+.PHONY: revendor
+revendor:
+ @GO111MODULE=on go mod vendor
+ @GO111MODULE=on go mod tidy
+ @chmod +x $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/*
+ @chmod +x $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/.ci/*
+ @$(REPO_ROOT)/hack/update-github-templates.sh
+
+.PHONY: clean
+clean:
+ @$(shell find ./example -type f -name "controller-registration.yaml" -exec rm '{}' \;)
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/clean.sh ./cmd/... ./pkg/... ./test/...
+
+.PHONY: check-generate
+check-generate:
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/check-generate.sh $(REPO_ROOT)
+
+.PHONY: check
+check:
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/check.sh --golangci-lint-config=./.golangci.yaml ./cmd/... ./pkg/... ./test/...
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/check-charts.sh ./charts
+
+.PHONY: generate
+generate:
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/generate.sh ./charts/... ./cmd/... ./pkg/... ./test/...
+ @rm -rf ./pkg/client/fleet/clientset/internalversion;
+.PHONY: format
+format:
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/format.sh ./cmd ./pkg ./test
+
+.PHONY: test
+test:
+ @SKIP_FETCH_TOOLS=1 $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/test.sh ./cmd/... ./pkg/...
+
+.PHONY: test-cov
+test-cov:
+ @SKIP_FETCH_TOOLS=1 $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/test-cover.sh ./cmd/... ./pkg/...
+
+.PHONY: test-clean
+test-clean:
+ @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/test-cover-clean.sh
+
+.PHONY: verify
+verify: check format test
+
+.PHONY: verify-extended
+verify-extended: install-requirements check-generate check format test-cov test-clean
diff --git a/NOTICE.md b/NOTICE.md
new file mode 100644
index 0000000..4bbe7e8
--- /dev/null
+++ b/NOTICE.md
@@ -0,0 +1,15 @@
+## Gardener Extensions
+Copyright (c) 2017-2019 SAP SE or an SAP affiliate company. All rights reserved.
+
+## Seed Source
+
+The source code of this component was seeded based on a copy of the following files from [github.com/kubernetes-sigs](github.com/kubernetes-sigs):
+
+controller-runtime.
+https://sigs.k8s.io/controller-runtime.
+Copyright 2018 The Kubernetes Authors.
+Apache 2 license (https://sigs.k8s.io/controller-runtime/LICENSE).
+
+Version: 0.1.9.
+Commit-ID: f6f0bc9611363b43664d08fb097ab13243ef621d
+Commit-Message: Merge pull request #263 from DirectXMan12/release/v0.1.9
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..bf2574c
--- /dev/null
+++ b/README.md
@@ -0,0 +1,59 @@
+# [Gardener Extension for Fleet agent installation](https://gardener.cloud)
+
+[](https://concourse.ci.gardener.cloud/teams/gardener/pipelines/gardener-extension-shoot-fleet-agent-master/jobs/master-head-update-job)
+[](https://goreportcard.com/report/github.com/javamachr/gardener-extension-shoot-fleet-agent)
+
+Project Gardener implements the automated management and operation of [Kubernetes](https://kubernetes.io/) clusters as a service. Its main principle is to leverage Kubernetes concepts for all of its tasks.
+
+Recently, most of the vendor specific logic has been developed [in-tree](https://github.com/gardener/gardener). However, the project has grown to a size where it is very hard to extend, maintain, and test. With [GEP-1](https://github.com/gardener/gardener/blob/master/docs/proposals/01-extensibility.md) we have proposed how the architecture can be changed in a way to support external controllers that contain their very own vendor specifics. This way, we can keep Gardener core clean and independent.
+
+## Configuration
+
+Example configuration for this extension controller:
+
+```yaml
+apiVersion: shoot-fleet-agent-service.extensions.config.gardener.cloud/v1alpha1
+kind: Configuration
+clientConnection:
+ kubeconfig: #base64encoded kubeconfig of cluster running Fleet manager
+ labels: #extra labels to apply to Cluster registration
+ env: dev
+```
+
+## Extension-Resources
+
+Example extension resource:
+
+```yaml
+apiVersion: extensions.gardener.cloud/v1alpha1
+kind: Extension
+metadata:
+ name: "extension-shoot-fleet-agent"
+ namespace: shoot--project--abc
+spec:
+ type: shoot-fleet-agent
+```
+
+When an extension resource is reconciled, the extension controller will register Shoot cluster in Fleet management cluster(configured in kubeconfig in Configuration object above.
+
+Please note, this extension controller relies on existing properly configured [Fleet multi-cluster deployment](https://fleet.rancher.io/multi-cluster-install/) configured above.
+
+## How to start using or developing this extension controller locally
+
+You can run the controller locally on your machine by executing `make start`. Please make sure to have the kubeconfig to the cluster you want to connect to ready in the `./dev/kubeconfig` file.
+Static code checks and tests can be executed by running `VERIFY=true make all`. We are using Go modules for Golang package dependency management and [Ginkgo](https://github.com/onsi/ginkgo)/[Gomega](https://github.com/onsi/gomega) for testing.
+
+## Feedback and Support
+
+Feedback and contributions are always welcome. Please report bugs or suggestions as [GitHub issues](https://github.com/javamachr/gardener-extension-shoot-fleet-agent/issues) or join our [Slack channel #gardener](https://kubernetes.slack.com/messages/gardener) (please invite yourself to the Kubernetes workspace [here](http://slack.k8s.io)).
+
+## Learn more!
+
+Please find further resources about out project here:
+
+* [Our landing page gardener.cloud](https://gardener.cloud/)
+* ["Gardener, the Kubernetes Botanist" blog on kubernetes.io](https://kubernetes.io/blog/2018/05/17/gardener/)
+* ["Gardener Project Update" blog on kubernetes.io](https://kubernetes.io/blog/2019/12/02/gardener-project-update/)
+* [Gardener Extensions Golang library](https://godoc.org/github.com/gardener/gardener/extensions/pkg)
+* [GEP-1 (Gardener Enhancement Proposal) on extensibility](https://github.com/gardener/gardener/blob/master/docs/proposals/01-extensibility.md)
+* [Extensibility API documentation](https://github.com/gardener/gardener/tree/master/docs/extensions)
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..74a91a8
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+v1.0.0-dev
diff --git a/charts/gardener-extension-shoot-cert-service/.helmignore b/charts/gardener-extension-shoot-cert-service/.helmignore
new file mode 100644
index 0000000..50af031
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/gardener-extension-shoot-cert-service/Chart.yaml b/charts/gardener-extension-shoot-cert-service/Chart.yaml
new file mode 100644
index 0000000..621a2df
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for the Gardener Shoot Fleet Agent extension.
+name: gardener-extension-shoot-fleet-agent
+version: 0.1.0
diff --git a/charts/gardener-extension-shoot-cert-service/doc.go b/charts/gardener-extension-shoot-cert-service/doc.go
new file mode 100644
index 0000000..c4b10b7
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/doc.go
@@ -0,0 +1,18 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate ../../vendor/github.com/gardener/gardener/hack/generate-controller-registration.sh extension-shoot-fleet-agent . ../../VERSION ../../example/controller-registration.yaml Extension:shoot-fleet-agent
+
+// Package chart enables go:generate support for generating the correct controller registration.
+package chart
diff --git a/charts/gardener-extension-shoot-cert-service/templates/_helpers.tpl b/charts/gardener-extension-shoot-cert-service/templates/_helpers.tpl
new file mode 100644
index 0000000..cc224e0
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/_helpers.tpl
@@ -0,0 +1,36 @@
+{{- define "name" -}}
+gardener-extension-shoot-fleet-agent
+{{- end -}}
+
+{{- define "agentconfig" -}}
+---
+apiVersion: shoot-fleet-agent-service.extensions.config.gardener.cloud/v1alpha1
+kind: FleetAgentConfig
+clientConnection:
+ kubeconfig: {{ .Values.fleetManager.kubeconfig }}
+{{- if .Values.fleetManager.labels }}
+labels: {{ .Values.fleetManager.labels | toYaml }}
+{{- end }}
+{{- end }}
+
+{{- define "image" -}}
+ {{- if hasPrefix "sha256:" .Values.image.tag }}
+ {{- printf "%s@%s" .Values.image.repository .Values.image.tag }}
+ {{- else }}
+ {{- printf "%s:%s" .Values.image.repository .Values.image.tag }}
+ {{- end }}
+{{- end }}
+
+{{- define "priorityclassversion" -}}
+{{- if semverCompare ">= 1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+scheduling.k8s.io/v1
+{{- else if semverCompare ">= 1.11-0" .Capabilities.KubeVersion.GitVersion -}}
+scheduling.k8s.io/v1beta1
+{{- else -}}
+scheduling.k8s.io/v1alpha1
+{{- end -}}
+{{- end -}}
+
+{{- define "leaderelectionid" -}}
+extension-shoot-fleet-agent-leader-election
+{{- end -}}
\ No newline at end of file
diff --git a/charts/gardener-extension-shoot-cert-service/templates/configmap-imagevector-overwrite.yaml b/charts/gardener-extension-shoot-cert-service/templates/configmap-imagevector-overwrite.yaml
new file mode 100644
index 0000000..a4529dd
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/configmap-imagevector-overwrite.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.imageVectorOverwrite }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "name" . }}-imagevector-overwrite
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+data:
+ images_overwrite.yaml: |
+{{ .Values.imageVectorOverwrite | indent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/gardener-extension-shoot-cert-service/templates/deployment.yaml b/charts/gardener-extension-shoot-cert-service/templates/deployment.yaml
new file mode 100644
index 0000000..fa64e60
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/deployment.yaml
@@ -0,0 +1,74 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: gardener-extension-shoot-fleet-agent
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ helm.sh/chart: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
+spec:
+ revisionHistoryLimit: 0
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ annotations:
+ checksum/secret-fleet-service-config: {{ include "agentconfig" . | sha256sum }}
+ {{- if .Values.imageVectorOverwrite }}
+ checksum/configmap-extension-imagevector-overwrite: {{ include (print $.Template.BasePath "/configmap-imagevector-overwrite.yaml") . | sha256sum }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ spec:
+ priorityClassName: gardener-extension-shoot-fleet-agent
+ serviceAccountName: gardener-extension-shoot-fleet-agent
+ containers:
+ - name: gardener-extension-shoot-fleet-agent
+ image: {{ include "image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - /gardener-extension-shoot-fleet-agent
+ - --config=/etc/fleet-service/config.yaml
+ - --max-concurrent-reconciles={{ .Values.controllers.concurrentSyncs }}
+ - --healthcheck-max-concurrent-reconciles={{ .Values.controllers.healthcheck.concurrentSyncs }}
+ - --disable-controllers={{ .Values.disableControllers | join "," }}
+ - --ignore-operation-annotation={{ .Values.controllers.ignoreOperationAnnotation }}
+ - --leader-election-id={{ include "leaderelectionid" . }}
+ env:
+ - name: LEADER_ELECTION_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.imageVectorOverwrite }}
+ - name: IMAGEVECTOR_OVERWRITE
+ value: /charts_overwrite/images_overwrite.yaml
+ {{- end }}
+ {{- if .Values.resources }}
+ resources:
+{{ toYaml .Values.resources | trim | indent 10 }}
+ {{- end }}
+ volumeMounts:
+ - name: fleet-service-config
+ mountPath: /etc/fleet-service
+ readOnly: true
+ {{- if .Values.imageVectorOverwrite }}
+ - name: extension-imagevector-overwrite
+ mountPath: /charts_overwrite/
+ readOnly: true
+ {{- end }}
+ volumes:
+ - name: fleet-service-config
+ secret:
+ secretName: extension-shoot-fleet-agent-service.config
+ {{- if .Values.imageVectorOverwrite }}
+ - name: extension-imagevector-overwrite
+ configMap:
+ name: {{ include "name" .}}-imagevector-overwrite
+ defaultMode: 420
+ {{- end }}
diff --git a/charts/gardener-extension-shoot-cert-service/templates/fleet-config.yaml b/charts/gardener-extension-shoot-cert-service/templates/fleet-config.yaml
new file mode 100644
index 0000000..19fef90
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/fleet-config.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: extension-shoot-fleet-agent-service.config
+ namespace: {{ .Release.Namespace }}
+data:
+ config.yaml: {{ include "agentconfig" . | b64enc | trim }}
\ No newline at end of file
diff --git a/charts/gardener-extension-shoot-cert-service/templates/priorityclass.yaml b/charts/gardener-extension-shoot-cert-service/templates/priorityclass.yaml
new file mode 100644
index 0000000..e543862
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/priorityclass.yaml
@@ -0,0 +1,7 @@
+apiVersion: {{ include "priorityclassversion" . }}
+kind: PriorityClass
+metadata:
+ name: gardener-extension-shoot-fleet-agent
+value: 1000000000
+globalDefault: true
+description: "Priority class for the Gardener extension: shoot-fleet-agent."
\ No newline at end of file
diff --git a/charts/gardener-extension-shoot-cert-service/templates/rbac-shoot-cert-service.yaml b/charts/gardener-extension-shoot-cert-service/templates/rbac-shoot-cert-service.yaml
new file mode 100644
index 0000000..e990f88
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/rbac-shoot-cert-service.yaml
@@ -0,0 +1,16 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: extensions.gardener.cloud:extension-shoot-fleet-agent
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - "secrets"
+ verbs:
+ - "*"
diff --git a/charts/gardener-extension-shoot-cert-service/templates/rbac.yaml b/charts/gardener-extension-shoot-cert-service/templates/rbac.yaml
new file mode 100644
index 0000000..23e60d9
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/rbac.yaml
@@ -0,0 +1,113 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: gardener-extension-shoot-fleet-agent
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ helm.sh/chart: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
+rules:
+- apiGroups:
+ - extensions.gardener.cloud
+ resources:
+ - clusters
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions.gardener.cloud
+ resources:
+ - extensions
+ - extensions/status
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - create
+ - update
+ - patch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - clusterroles
+ - clusterrolebindings
+ - roles
+ - rolebindings
+ verbs:
+ - get
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - "configmaps"
+ - "secrets"
+ - "events"
+ - "services"
+ - "pods"
+ - "serviceaccounts"
+ verbs:
+ - "*"
+- apiGroups:
+ - "apps"
+ resources:
+ - "deployments"
+ verbs:
+ - get
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ resourceNames:
+ - {{ include "leaderelectionid" . }}
+ verbs:
+ - update
+ - get
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gardener-extension-shoot-fleet-agent
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ helm.sh/chart: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: gardener-extension-shoot-fleet-agent
+subjects:
+- kind: ServiceAccount
+ name: gardener-extension-shoot-fleet-agent
+ namespace: {{ .Release.Namespace }}
diff --git a/charts/gardener-extension-shoot-cert-service/templates/serviceaccount.yaml b/charts/gardener-extension-shoot-cert-service/templates/serviceaccount.yaml
new file mode 100644
index 0000000..8e5759e
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/serviceaccount.yaml
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: gardener-extension-shoot-fleet-agent
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+ helm.sh/chart: gardener-extension-shoot-fleet-agent
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/gardener-extension-shoot-cert-service/templates/vpa.yaml b/charts/gardener-extension-shoot-cert-service/templates/vpa.yaml
new file mode 100644
index 0000000..d949668
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/templates/vpa.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.vpa.enabled}}
+apiVersion: "autoscaling.k8s.io/v1beta2"
+kind: VerticalPodAutoscaler
+metadata:
+ name: gardener-extension-shoot-fleet-agent-vpa
+ namespace: {{ .Release.Namespace }}
+spec:
+ {{- if .Values.vpa.resourcePolicy }}
+ resourcePolicy:
+ containerPolicies:
+ - containerName: '*'
+ minAllowed:
+ cpu: {{ required ".Values.vpa.resourcePolicy.minAllowed.cpu is required" .Values.vpa.resourcePolicy.minAllowed.cpu }}
+ memory: {{ required ".Values.vpa.resourcePolicy.minAllowed.memory is required" .Values.vpa.resourcePolicy.minAllowed.memory }}
+ {{- end }}
+ targetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: gardener-extension-shoot-fleet-agent
+ updatePolicy:
+ updateMode: {{ .Values.vpa.updatePolicy.updateMode }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/gardener-extension-shoot-cert-service/values.yaml b/charts/gardener-extension-shoot-cert-service/values.yaml
new file mode 100644
index 0000000..1a74f51
--- /dev/null
+++ b/charts/gardener-extension-shoot-cert-service/values.yaml
@@ -0,0 +1,40 @@
+image:
+ repository: javamachr/gardener-extension-shoot-fleet-agent
+ tag: latest
+ pullPolicy: Always
+
+resources:
+ requests:
+ cpu: "50m"
+ memory: "64Mi"
+ limits:
+ cpu: "50m"
+ memory: "128Mi"
+
+vpa:
+ enabled: true
+ # resourcePolicy:
+ # minAllowed:
+ # cpu: 100m
+ # memory: 128Mi
+ updatePolicy:
+ updateMode: "Auto"
+
+controllers:
+ concurrentSyncs: 1
+ ignoreOperationAnnotation: false
+ healthcheck:
+ concurrentSyncs: 1
+
+fleetManager:
+ kubeconfig: YXBpVmVyc2lvbjogdjEKY2x1c3RlcnM6Ci0gY2x1c3RlcjoKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiBMUzB0TFMxQ1JVZEpUaUJEUlZKVVNVWkpRMEZVUlMwdExTMHRDazFKU1VVMlJFTkRRWFJEWjBGM1NVSkJaMGxSVWtsYWVFOXZaMmQxYW1aSlYwbHVhR3RGUlhSRGVrRk9RbWRyY1docmFVYzVkekJDUVZGelJrRkVRVTRLVFZGemQwTlJXVVJXVVZGRVJYZEthbGxVUVdkR2R6QjVUVlJCZUUxVVRYaE5la2t3VFhwR1lVZEJPSGxOUkZWNFRVUkZlRTE2UlhwTmVsRjZUVlp2ZHdwRVZFVk1UVUZyUjBFeFZVVkJlRTFEV1RKRmQyZG5TV2xOUVRCSFExTnhSMU5KWWpORVVVVkNRVkZWUVVFMFNVTkVkMEYzWjJkSlMwRnZTVU5CVVVOM0NtNUhibU53TDB4Vk9XSjNhek56SzNReVJqZHpNRlJhWlRaTFJVRkZVMjVWZFZCTE4xTkROMHQwY0d0M1JuRnBiR3hKV1RSdWRXeG1UQzgxYmxSMU1UQUtjRUZ2YVhGbWRHRnVabVJQUTJKWlMyTkdSR3Q0TldsNU0wMDVaSFp4WmtwMWEwSmpSR3hMT0VaM1RtbzVPVVpDYjJwa1dtMXpjVWw0U0VOeVRqRm9SUXBoYzJVM2VHUXphamxJTUZWa1VUVlFSek4wTTNFeVZHOVRURTVoZVZoVVdIWTBNRlZ3VlhacVowRnJibXMwYmtGS01rRlphWFZWWlZwNGEyeDVOMkp0Q2s1NGRuRmtTR0l5UVV3ck5uSmpSREZPUkVoWWNraFNiRVJLWTFsalZHTkJXVWxEY0hWQ1JVSjJRVXBzT1c0eVFXdFBSM0p4YmtJeUszVmtSRFZZWVcwS2JscDVNWGx5U1daNWVFaFdVVk5TZEhkRlZHVkJiakZrY3pSS1pWQkRjMXBUZUdKVFZIRjZkamRQZDFVelJuWm1SekpSSzJWVFVGVmlSa3BHUWtaTmJBcFBZaTh3TDNobldqWXZWVGwwWTJaQ2F6WjZkbVZCUWxJeldHMUpNa3RSUW5KUGIwWndhMnBHTUdvek9WTXZWRmxCWkVKM1pXRnpjak5tY1ZRelJsWnNDbWhJUTNWdFYxUlFZVlIzWjBzM1NVUXhVbTlXY1dSa1FraE5ZbFpYUVd3MldGZzRabU15Ym1scEwwcFdkV1JqZWsxVmJtVlZMM0I2U1V4b2N6aFJPV01LYTAxeU1XZEVXRGxSWXpCQmNtbFVVRzFXYlRGbGFVSmFRekJXU25oU2NraHFOSE56ZVdkVWFWUllPVXRRYmxKMFdsRmhNVXhRTXpsdmVrSXJaa1pITWdwMmRFWnphMVJaY0hKTWFuZHZjM1pWTkdnNWFFRnNVbVpWWVdwb0sySkVla0U1THpaaFVVTXljVnBXVjJ0aWNtbGtRMU5GVEdKNUt6aG9hVGx5VFRWekNtMTZVQzkxU2pCSmRHdDNkWE5wVDJNeWRuUlROWE5wZUhNelNHaG9WRVUxWkc1RlJ6aENaa05RTWtsaFZqUktORFEyY1ZoMmNFUlZXbTA1U3pWMmRYUUtjbUZyZGtKUEwydFZSRmhTV1RKM1EwcE1lbmx6WVhjMVVYTTFVbUpTTkdZdlZEVnJlalpEZURSM1NVUkJVVUZDYnpCSmQxRkVRVTlDWjA1V1NGRTRRZ3BCWmpoRlFrRk5RMEZ4VVhkRWQxbEVWbEl3VkVGUlNDOUNRVlYzUVhkRlFpOTZRV1JDWjA1V1NGRTBSVVpuVVZVeEsxY3lNRWxPYkZvelYxQk5UVVpZQ2s1SU5taExhUzl2YTNwamQwUlJXVXBMYjFwSmFIWmpUa0ZSUlV4Q1VVRkVaMmRKUWtGQlJXeFlhREpCSzJjeWJHSTJNMUY0ZHlzdk5WazVWMWRpZVdNS1FrMVFMemhVUVdaMlkzcDJRM0J4V25scFQwaEpSMjV0U21KNVFuaHljbm8wYUROT2RrVnZkMlEyYTNWeE9UUkpkakoyWnpkc1JHZ3lSMDFZUTB4cGJnbzBjazUwZGt0NVZWTTFVMGMxVTNWTGNVZElWMU5CUVdZdkswZzFUbXhYTVhnMFRIQjNZbXNyVGk5V1dXTnZjRXRyTDNoYVpHbHplak5KTDNSeFJHbFpDbWhXVW5aQ1ZqQjNOMkkzWVhScVYyeGplV0V6WTNBclMybFpNVEZ6UlhSSFkyRjZXR1pwVEU1UmEzUTRObTA0Y0V0TmVtRlpVVmd4YkZvd01qWXhLMU1LTVRoVk5VRk9NVVV2VDB4dFN6VkJORW95YXpGTFQyWm1UbVUwVFhwcmNIUmxWMlJPU1VKNVEzVm9PRlpYVkN0dGRuZDRhWGh3Y21Ka2FUWlVla3hUUkFwMWFqQkVlbnB1VVc1V1pGQkRjeXMwUlhaSllqRXJZbk5QV0hoSWFFc3dlWGRoWTJ4amNHUlVjRVpqV0dkdFIwOVhUek5SU2xGSFVEVTFUMlZHTDJneENrdFBRVWhwUTA5RU5rcEZZbkp0WTFkbVNXWk9hRTQ1VmxGWVZXeHNWa2h2UVRkWlp6aDJabU14UVVaV1prYzVaVE5UUXpScVF6WjViVFEyUjNCRFdVOEtNVkV5TmxOWVJHRnphVGRaUVhGVGNFbDVZMjF4Y2xac2QyZERRVkZzTkZFek9YZHRVbWRGVW5CV2RsQlhiMDF6YkV0RlVGazVWbXAwZVhvNFNWSnNaQXB1VmsxWGJGWTRZVkpqV2tvdlZFNUhZbTQzYkVkdFdHOWxWalpTVVhsalpVRkJkR1JQVHpkdlprWktjVWRqYkRJNVYxcDJiellyY2xGUWRYbEZObVYzQ25acWRsTlNTa2RUU0V4ME9HUXZVMlpRTWtFMk5rVXdiMk4yU2xNMWFXcEJlV05OUldOVU9HOXJSVnB5TVVWdWJTOUljbG8zTDBKelJtSTBia2wzYXprS2NVODBZMDAxVXpnNU0wOTNVMVZNYjA1RllXRnphbGgyTlcxWWFYUnZMelZKTVc1UGNWcGpNR2x6YUVOTk5XdEhOMHRtTURadGVHczVZelIwYVVJek1RcHBXRXRGVUdkWlpqVmxOVkpxZVVGTENpMHRMUzB0UlU1RUlFTkZVbFJKUmtsRFFWUkZMUzB0TFMwSwogICAgc2VydmVyOiBodHRwczovL29wcy00NDAyM2I2Yy5oY3Aud2VzdGV1cm9wZS5hem1rOHMuaW86NDQzCiAgbmFtZTogb3BzCmNvbnRleHRzOgotIGNvbnRleHQ6CiAgICBjbHVzdGVyOiBvcHMKICAgIHVzZXI6IGdhcmRlbmVyLWZsZWV0LWFnZW50CiAgbmFtZTogb3BzCmN1cnJlbnQtY29udGV4dDogb3BzCmtpbmQ6IENvbmZpZwpwcmVmZXJlbmNlczoge30KdXNlcnM6Ci0gbmFtZTogZ2FyZGVuZXItZmxlZXQtYWdlbnQKICB1c2VyOgogICAgdG9rZW46IGV5SmhiR2NpT2lKU1V6STFOaUlzSW10cFpDSTZJbGxqYkRSM2VtVkdZMFpEZUdaRmVIY3hhRGxHWjAxYVFteFpURE5qTVRoVGFXMU1ibmxmTm1rNWRVa2lmUS5leUpwYzNNaU9pSnJkV0psY201bGRHVnpMM05sY25acFkyVmhZMk52ZFc1MElpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJhV05sWVdOamIzVnVkQzl1WVcxbGMzQmhZMlVpT2lKamJIVnpkR1Z5Y3lJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpaV0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKbllYSmtaVzVsY2kxbWJHVmxkQzFoWjJWdWRDMTBiMnRsYmkxNmRtNW9jeUlzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG01aGJXVWlPaUpuWVhKa1pXNWxjaTFtYkdWbGRDMWhaMlZ1ZENJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpaV0ZqWTI5MWJuUXZjMlZ5ZG1salpTMWhZMk52ZFc1MExuVnBaQ0k2SWpNME9HUTVOVGRtTFRCaVpETXRORFU0T0MxaU5EUm1MV1l5T1RkaU1EWXhZV1JqTXlJc0luTjFZaUk2SW5ONWMzUmxiVHB6WlhKMmFXTmxZV05qYjNWdWREcGpiSFZ6ZEdWeWN6cG5ZWEprWlc1bGNpMW1iR1ZsZEMxaFoyVnVkQ0o5Lm1OR1hHekN5MFhBTTZMWTlSTEJhTTZral9RSHBGdVVhcU54WklZT3dldm1VSEFwMTF6NEVLMThOZGtJWDRlZEhHT3hkbHlVWUFfUWg1V3dfalBndGJpOEk2NmNickRWX2oxZGtlVlp4SWN4VVc5X3BhN0NEVUpveDF6dUNienJMRjNfV0JFZFVkU1JnWUtVaURjUXZWcm9mUWVQOVQtdVRFUWdkYkpLenBEcThZd0hQY0VjdmJ5a3QtYThUMWNNX1dCMHlta0dRLXhCYUJKRHp1ZEFvNVMwUVQ4NHZrOV8xNFlEb1FtUkZ2MkxnRlNRQUhqcTdDYnM2QXJjWm9QS3Q0aUVxRVJIRGZGcDdmQWozVFQ1QmFVN0MxV2NfazRYLVJYdFJzWnY4Uml0UDVPUUszdWRyaEtGWi1GdVhIaXN0R19NcXV2VlZrWkhiOUdTZldzblpuRnJnaWNTbDJSVVIxWDN0dlJIODljYzk0a3oyNGZ0cG13Q3BCNlNRSDRzZWxMRnBsb0tpWFMtcnR3cGU4SXFkS2dDTEFZWm5CMG1fS1JGWVE1cEFOV2hFeWF4MkVsTzRaSDN0eFU4TWJSMlJnWm1hWXJLZzF5YkQ5MnJ1VnlrR01fSUlhY2NLc1gtZjdjQ1RZTnBCZm1IaHAzVUhMQmJKVENHZU42ODlxLWVqTEhvcFpaNmxDLTdPMkpBMWRCM3hOenVQbDg1SHJxclIxZEZ4Sk84bjFIaEQwdWdZeHYyRXZVMlhYMkVuVFNGcFd4TlF5dlhFcjVOY1JiRXdfTk1vTUh2b1p2V21jYzg3M2J4TWJvR1p0dWU5ZmZSVkJnMmFSWlU5X1I1bVlEUkZiRWQ5VWxRRHZuS0RuNlJ4LVNBLW5nNVZFTXJzUG1keDAtem01WnZReUNrCgo=
+ labels:
+
+disableControllers: []
+
+# imageVectorOverwrite: |
+# images:
+# - name: fleet-management
+# sourceRepository: github.com/gardener/fleet-management
+# repository: eu.gcr.io/gardener-project/fleet-controller-manager
+# tag: "0.2.9"
diff --git a/cmd/gardener-extension-shoot-fleet-agent/app/app.go b/cmd/gardener-extension-shoot-fleet-agent/app/app.go
new file mode 100644
index 0000000..080cada
--- /dev/null
+++ b/cmd/gardener-extension-shoot-fleet-agent/app/app.go
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package app
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller"
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/healthcheck"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+ "github.com/gardener/gardener/extensions/pkg/util"
+
+ "github.com/spf13/cobra"
+ corev1 "k8s.io/api/core/v1"
+ componentbaseconfig "k8s.io/component-base/config"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+// NewServiceControllerCommand creates a new command that is used to start the Fleet Service controller.
+func NewServiceControllerCommand() *cobra.Command {
+ options := NewOptions()
+
+ cmd := &cobra.Command{
+ Use: "shoot-fleet-agent-controller-manager",
+ Short: "Shoot Fleet Service Controller manages components which register Cluster in Fleet.",
+ SilenceErrors: true,
+
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := options.optionAggregator.Complete(); err != nil {
+ return fmt.Errorf("error completing options: %s", err)
+ }
+ cmd.SilenceUsage = true
+ return options.run(cmd.Context())
+ },
+ }
+
+ options.optionAggregator.AddFlags(cmd.Flags())
+
+ return cmd
+}
+
+func (o *Options) run(ctx context.Context) error {
+ // TODO: Make these flags configurable via command line parameters or component config file.
+ util.ApplyClientConnectionConfigurationToRESTConfig(&componentbaseconfig.ClientConnectionConfiguration{
+ QPS: 100.0,
+ Burst: 130,
+ }, o.restOptions.Completed().Config)
+
+ mgrOpts := o.managerOptions.Completed().Options()
+
+ mgrOpts.ClientDisableCacheFor = []client.Object{
+ &corev1.Secret{}, // applied for ManagedResources
+ &corev1.ConfigMap{}, // applied for monitoring config
+ }
+
+ mgr, err := manager.New(o.restOptions.Completed().Config, mgrOpts)
+ if err != nil {
+ return fmt.Errorf("could not instantiate controller-manager: %s", err)
+ }
+
+ if err := extensionscontroller.AddToScheme(mgr.GetScheme()); err != nil {
+ return fmt.Errorf("could not update manager scheme: %s", err)
+ }
+
+ ctrlConfig := o.fleetOptions.Completed()
+ ctrlConfig.Apply(&controller.DefaultAddOptions.ServiceConfig)
+ o.controllerOptions.Completed().Apply(&controller.DefaultAddOptions.ControllerOptions)
+ o.healthOptions.Completed().Apply(&healthcheck.DefaultAddOptions.Controller)
+ o.reconcileOptions.Completed().Apply(&controller.DefaultAddOptions.IgnoreOperationAnnotation)
+
+ if err := o.controllerSwitches.Completed().AddToManager(mgr); err != nil {
+ return fmt.Errorf("could not add controllers to manager: %s", err)
+ }
+
+ if err := mgr.Start(ctx); err != nil {
+ return fmt.Errorf("error running manager: %s", err)
+ }
+
+ return nil
+}
diff --git a/cmd/gardener-extension-shoot-fleet-agent/app/options.go b/cmd/gardener-extension-shoot-fleet-agent/app/options.go
new file mode 100644
index 0000000..7463b15
--- /dev/null
+++ b/cmd/gardener-extension-shoot-fleet-agent/app/options.go
@@ -0,0 +1,74 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package app
+
+import (
+ "os"
+
+ fleetagentservicecmd "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/cmd"
+
+ controllercmd "github.com/gardener/gardener/extensions/pkg/controller/cmd"
+)
+
+// ExtensionName is the name of the extension.
+const ExtensionName = "extension-shoot-fleet-agent"
+
+// Options holds configuration passed to the Fleet Service controller.
+type Options struct {
+ fleetOptions *fleetagentservicecmd.FleetServiceOptions
+ restOptions *controllercmd.RESTOptions
+ managerOptions *controllercmd.ManagerOptions
+ controllerOptions *controllercmd.ControllerOptions
+ healthOptions *controllercmd.ControllerOptions
+ controllerSwitches *controllercmd.SwitchOptions
+ reconcileOptions *controllercmd.ReconcilerOptions
+ optionAggregator controllercmd.OptionAggregator
+}
+
+// NewOptions creates a new Options instance.
+func NewOptions() *Options {
+ options := &Options{
+ fleetOptions: &fleetagentservicecmd.FleetServiceOptions{},
+ restOptions: &controllercmd.RESTOptions{},
+ managerOptions: &controllercmd.ManagerOptions{
+ // These are default values.
+ LeaderElection: true,
+ LeaderElectionID: controllercmd.LeaderElectionNameID(ExtensionName),
+ LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"),
+ },
+ controllerOptions: &controllercmd.ControllerOptions{
+ // This is a default value.
+ MaxConcurrentReconciles: 5,
+ },
+ healthOptions: &controllercmd.ControllerOptions{
+ // This is a default value.
+ MaxConcurrentReconciles: 5,
+ },
+ controllerSwitches: fleetagentservicecmd.ControllerSwitches(),
+ reconcileOptions: &controllercmd.ReconcilerOptions{},
+ }
+
+ options.optionAggregator = controllercmd.NewOptionAggregator(
+ options.restOptions,
+ options.managerOptions,
+ options.controllerOptions,
+ options.fleetOptions,
+ controllercmd.PrefixOption("healthcheck-", options.healthOptions),
+ options.controllerSwitches,
+ options.reconcileOptions,
+ )
+
+ return options
+}
diff --git a/cmd/gardener-extension-shoot-fleet-agent/main.go b/cmd/gardener-extension-shoot-fleet-agent/main.go
new file mode 100644
index 0000000..acec3cc
--- /dev/null
+++ b/cmd/gardener-extension-shoot-fleet-agent/main.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/cmd/gardener-extension-shoot-fleet-agent/app"
+
+ controllercmd "github.com/gardener/gardener/extensions/pkg/controller/cmd"
+ "github.com/gardener/gardener/extensions/pkg/log"
+ runtimelog "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+)
+
+func main() {
+ runtimelog.SetLogger(log.ZapLogger(false))
+
+ ctx := signals.SetupSignalHandler()
+ if err := app.NewServiceControllerCommand().ExecuteContext(ctx); err != nil {
+ controllercmd.LogErrAndExit(err, "error executing the main controller command")
+ }
+}
diff --git a/docs/installation/setup.md b/docs/installation/setup.md
new file mode 100644
index 0000000..a594977
--- /dev/null
+++ b/docs/installation/setup.md
@@ -0,0 +1,79 @@
+# Gardener Fleet agent management
+
+## Introduction
+Gardener comes with an extension that enables shoot owners to register their cluster in Fleet.
+
+## Extension Installation
+The `shoot-fleet-agent` extension can be deployed and configured via Gardener's native resource [ControllerRegistration](https://github.com/gardener/gardener/blob/master/docs/extensions/controllerregistration.md).
+
+### Prerequisites
+To let the `shoot-fleet-agent` operate properly, you need to have:
+- a working cluster with Fleet multicluster setup enabled
+- have kubeconfig with read/write access to cluster.fleet.cattle.io and secret resrouces in some namespace
+
+### ControllerRegistration
+An example of a `ControllerRegistration` for the `shoot-fleet-agent` can be found here: https://github.com/javamachr/gardener-extension-shoot-fleet-agent/blob/master/example/controller-registration.yaml
+
+### Configuration
+The `ControllerRegistration` contains a Helm chart which eventually deploy the `shoot-fleet-agent` to seed clusters.
+
+```yaml
+apiVersion: core.gardener.cloud/v1beta1
+kind: ControllerRegistration
+...
+ values:
+ clientConnection:
+ kubeconfig: abcd
+ labels:
+```
+
+If the `shoot-fleet-agent` should be enabled for every shoot cluster in your Gardener managed environment, you need to globally enable it in the `ControllerRegistration`:
+```yaml
+apiVersion: core.gardener.cloud/v1beta1
+kind: ControllerRegistration
+...
+ resources:
+ - globallyEnabled: true
+ kind: Extension
+ type: shoot-fleet-agent
+```
+
+Alternatively, you're given the option to only enable the service for certain shoots:
+```yaml
+kind: Shoot
+apiVersion: core.gardener.cloud/v1beta1
+...
+spec:
+ extensions:
+ - type: shoot-fleet-agent
+...
+```
+
+
diff --git a/docs/usage/register_cluster.md b/docs/usage/register_cluster.md
new file mode 100644
index 0000000..c2a2987
--- /dev/null
+++ b/docs/usage/register_cluster.md
@@ -0,0 +1,51 @@
+# Register Shoot cluster in Fleet manager
+
+## Introduction
+Gardener takes care of provisioning clusters. It doesn't install anything into created clusters.
+This extension enables App instalation via [Fleet](https://fleet.rancher.io) by registering newly created Shoot clusters into Fleet manager.
+
+### Service Scope
+This service enables users to register Shoot cluster in Fleet.
+```yaml
+kind: Shoot
+...
+spec:
+ extensions:
+ - type: shoot-fleet-agent
+ providerConfig:
+ apiVersion: service.fleet-agent.extensions.gardener.cloud/v1alpha1
+ kind:
+ clientConnection:
+ kubeconfig: base64 encoded kubeconfig
+ labels:
+ env: test
+```
+
+
diff --git a/example/00-config.yaml b/example/00-config.yaml
new file mode 100644
index 0000000..44c54f4
--- /dev/null
+++ b/example/00-config.yaml
@@ -0,0 +1,5 @@
+apiVersion: shoot-fleet-agent-service.extensions.config.gardener.cloud/v1alpha1
+kind: FleetAgentConfig
+clientConnection:
+ kubeconfig: #base64 encoded kubeconfig
+namespace: clusters #namespace to register clusters in fleet manager cluster
\ No newline at end of file
diff --git a/example/10-fake-shoot-controlplane.yaml b/example/10-fake-shoot-controlplane.yaml
new file mode 100644
index 0000000..9cb95a2
--- /dev/null
+++ b/example/10-fake-shoot-controlplane.yaml
@@ -0,0 +1,186 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: shoot--foo--bar
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: garden
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: etcd
+ namespace: shoot--foo--bar
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ run: etcd
+ template:
+ metadata:
+ labels:
+ run: etcd
+ spec:
+ containers:
+ - image: quay.io/coreos/etcd:v3.3.12
+ name: etcd
+ command:
+ - etcd
+ - -advertise-client-urls=http://0.0.0.0:2379
+ - -listen-client-urls=http://0.0.0.0:2379
+ - -data-dir=/etcd-data
+ volumeMounts:
+ - mountPath: /etcd-data
+ name: data
+ volumes:
+ - name: data
+ emptyDir: {}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: etcd
+ namespace: shoot--foo--bar
+spec:
+ ports:
+ - port: 2379
+ selector:
+ run: etcd
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ca
+ namespace: shoot--foo--bar
+type: Opaque
+data:
+ ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrakNDQWVLZ0F3SUJBZ0lVVHAzWHZocldPVk04WkdlODZZb1hNVi9VSjdBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZURVRNQkVHQTFVRUF4TUthM1ZpWlhKdVpYUmxjekFlRncweE9UQXlNamN4TlRNME1EQmFGdzB5TkRBeQpNall4TlRNME1EQmFNQlV4RXpBUkJnTlZCQU1UQ210MVltVnlibVYwWlhNd2dnRWlNQTBHQ1NxR1NJYjNEUUVCCkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDeWkwUUdPY3YyYlRmM044T0xOOTdSd3NnSDZRQXI4d1NwQU9ydHRCSmcKRm5mblUyVDFSSGd4bTdxZDE5MFdMOERDaHYwZFpmNzZkNmVTUTRacmpqeUFyVHp1ZmI0RHRQd2crVldxN1h2RgpCTnluKzJoZjRTeVNrd2Q2azdYTGhVVFJ4MDQ4SWJCeUM0ditGRXZtb0xBd3JjMGQwRzE0ZWM2c25EKzdqTzdlCmt5a1EvTmdBT0w3UDZrRHM5ejYrYk9mZ0YwbkdOK2JtZVdRcUplalIwdCtPeVFEQ3g1L0ZNdFVmRVZSNVFYODAKYWVlZmdwM0pGWmI2ZkF3OUtoTHRkUlYzRlAwdHo2aFMrZTRTZzBtd0FBT3FpalpzVjg3a1A1R1l6anRjZkExMgpsRFlsL25iMUd0VnZ2a1FENDlWblY3bURubDZtRzNMQ01OQ05INldsWk52M0FnTUJBQUdqUWpCQU1BNEdBMVVkCkR3RUIvd1FFQXdJQkJqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU0ZBM0x2Sk0yMWQ4cXMKWlZWQ2U2UnJUVDl3aVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQW5zL0VKM3lLc2p0SVNvdGVRNzE0cjJVbQpCTVB5VVlUVGRSSEQ4TFpNZDNSeWt2c2FjRjJsMnk4OE56NndKY0F1b1VqMWg4YUJEUDVvWFZ0Tm1GVDlqeWJTClRYclJ2V2krYWVZZGI1NTZuRUE1L2E5NGUrY2IrQ2szcXkvMXhnUW9TNDU3QVpRT0Rpc0RaTkJZV2tBRnMyTGMKdWNwY0F0WEp0SXRoVm03RmpvQUhZY3NyWTA0eUFpWUVKTEQwMlRqVURYZzRpR09HTWtWSGRtaGF3QkRCRjNBagplc2ZjcUZ3amk2SnlBS0ZSQUNQb3d5a1FPTkZ3VVNvbTg5dVlFU1NDSkZ2TkNrOU1KbWpKMlB6RFV0NkN5cFI0CmVwRmRkMWZYTHd1d243ZnZQTW1KcUQzSHRMYWxYMUFabVBrK0JJOGV6ZkFpVmNWcW5USlFNWGxZUHBZZTlBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+ ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBc290RUJqbkw5bTAzOXpmRGl6ZmUwY0xJQitrQUsvTUVxUURxN2JRU1lCWjM1MU5rCjlVUjRNWnU2bmRmZEZpL0F3b2I5SFdYKytuZW5ra09HYTQ0OGdLMDg3bjIrQTdUOElQbFZxdTE3eFFUY3AvdG8KWCtFc2twTUhlcE8xeTRWRTBjZE9QQ0d3Y2d1TC9oUkw1cUN3TUszTkhkQnRlSG5Pckp3L3U0enUzcE1wRVB6WQpBRGkreitwQTdQYyt2bXpuNEJkSnhqZm01bmxrS2lYbzBkTGZqc2tBd3NlZnhUTFZIeEZVZVVGL05Hbm5uNEtkCnlSV1crbndNUFNvUzdYVVZkeFQ5TGMrb1V2bnVFb05Kc0FBRHFvbzJiRmZPNUQrUm1NNDdYSHdOZHBRMkpmNTIKOVJyVmI3NUVBK1BWWjFlNWc1NWVwaHR5d2pEUWpSK2xwV1RiOXdJREFRQUJBb0lCQURTSEZuZENiOGhMTDZqeQo1ZnZDYnpLRlBMWmZEV2JnczJGSlhOU2NJci9VUEdoNU4zMlZMRXRrQm81RG9NN1RNOEhIVnhaY0dKejFzUDR1CkVaRDVJc0cwdGZWd1Z3UGVMa05CTjd2MjdHLzFVem0wbEd1STRzVW9ybzJZZ1dha0NiQXlFOGxMSEE4aGFJbFMKelZYSHRxNUxvOG4rdFFZNXg5MHVodTJWcy8wVkRscXdNNzNhbDNrdThLRS9XMkxTK0xXMTkrYjV1UXQrZEg5ZQpjdDN5UnpHMXorUWNpT3JVN3dSNHMxTlViOGJ0eXNZNHpwNVo1bXNaMHBobXN1eWR0SVVESUpndjBYU2EzbHZsCkJ2M2M0MFhneGdHVkRHam1sS2dqbWM0TFNVVlA3Rk5wQlBZRFphL2gyTWNYeDZYYUlyd1huVVdNaUFLdi9IK0kKazdHMjRBa0NnWUVBNXBSbEJzTHVhaUsyekxpQ2k1UFNXU0xBZVZ3K1BMSGJZS2tELzBzOFhLQjEyUzI3Tk00cwpQdUpFWFMzZnFqKzl1aFhiM0VtUmxycEthVldRWDc1b1RJaG0yZDN0ZDNEbHNjOUZUcDZSdkZweU5ZUWRXZTJjClNFUE42UnF5VTlQTVBuYlhoaWp5U2ZNNGRQeGRCdUxlRG1DZWNvaDl1MzJ2bmczT3ZONXdOOVVDZ1lFQXhqcEgKV2VBbFFaTFFQNDdPNWNiejVNRVowc0JqbDA4Y3RMdUFFbU5aRnJVcWR3VjlESWdpc210OHlYWHowSWVRdGdkQwpxUWFoekdCU1lkc3JQZGtTS1JCYkw3eEdMSVhGVkNvOFhNYi83ZEJGdTI5NzBqeStBWnlXMGdycFg4UmJYU1VoCk9mZm5BeEpQY1U0Nzg5OFlpWUNTMkNUNWJaMkszRlY2YktncjFwc0NnWUVBM3ZMTlhHUlBNc0N1RC9TNEJVM0IKTGY2MExLUk1hVk52MDE2WlJ0ZndYWCtwYzMwTWJscUwzYUhhaUY0Nnpkc2tFREhpakhWMkdFKzRjM0VRVUFORAp4Zng5dGxzbnFUMjRXdDBYSHBXa1JJTVB1ejhyUWpERjAvbjd0MURnN0x6MTE5QUJSTytDbG81ZUlIK0RVNDA1Ck9KMmpsd3J5eDc4WGQ3UFNHanphTktFQ2dZQldxbkk1bENzVndVZDFFazNZM2lRUjFtOGcybVp3Wi9GSC8xWWUKTS90bVZ5ekt2c2FPYlJLbWFTSTB2bklyc0ttUFBCdGo1UGRtY0pKMElDdUdyZG9udy9QcUtlbVFXNmdMaFMvQQo3R3hHb0RGc29uQkRXYlZFNVI1M2xMZTEzQmFKNGNybUdrR1E4VGQwZFZ6MjRZcEx5Ny9uNmpwM04xTFh4RE56ClFBOXlrd0tCZ1FEUm8yR2VTeHRsUHJYNUlIakRWeTRBLzFqc3lBSTZHdEhqVXdVY3J6NVAzV1JKL0FQK1UzUisKYkUzMmxjV3A5OEFmUkZyYjFJNkYybVJXcHB2VjlFV2R0SnpjYzEyeUR1bUlUalVJdHhvMkxiY0d3SklGN1ZYZwpFODNuK0IrNmZ1WEl6Q1IyeTUrQjVyTTZPYnl1Z1NZUndGWG91dW9SeXhUL3VXRWl3U0J4ZXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: kube-apiserver
+ namespace: shoot--foo--bar
+type: Opaque
+data:
+ kube-apiserver.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVEakNDQXZhZ0F3SUJBZ0lVSjQzbHJublpXSmYyTEV2OWMxVDVoaDAzb1Fvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZURVRNQkVHQTFVRUF4TUthM1ZpWlhKdVpYUmxjekFlRncweE9UQTBNekF4TVRVNU1EQmFGdzB5T1RBMApNamN4TVRVNU1EQmFNQnN4R1RBWEJnTlZCQU1URUhONWMzUmxiVHBoY0dselpYSjJaWEl3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURFTjVKUUhsODJKeTdWeDcvT3VDOWl6dFhPZG1CSUV6Q0sKY3VCa0RxbVZhY3ZLUi90VjhZellCb2lVZCtVR0dKWG5TSFVjT1ArR200a0tPQkxOMDNpV3Fkakx2amU5d0tiZgp2ZVoxKzhaSFdDdUtTWXEzZE5BdXdKSzZqdnl3dDErOHhZUS9uYzVGYWkvR0owVE92NFE5YUlZd0VUV0t5eVFyCm4vU2NyY1M5NXJQTFhueFQvbVlxZCttczhBc0RQOUNDRWZoQnRCejN4ZmJ3RHk4L1BkSnp5dC9mOVVqR3JhcjMKbU1sbzdUL1V1VTcxYk1TUU1tNWtHZXNFSXU1RkozL2t5NTlpamdScW5FQ0JUbkZSeENzamh3anRQSkYveHFZYwpQdlJIT3RweU5nTUVsRmZqRlh5ZkU3Z1UzaGJScGRRRmtYeEpGT2V6eGdyMzQrRTN0SkFCQWdNQkFBR2pnZ0ZPCk1JSUJTakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0V3REFZRFZSMFQKQVFIL0JBSXdBREFkQmdOVkhRNEVGZ1FVMktlb2lxUEJQN29aRmEyVFJqcTFzU25xdkkwd0h3WURWUjBqQkJndwpGb0FVaFFOeTd5VE50WGZLckdWVlFudWthMDAvY0lrd2dkUUdBMVVkRVFTQnpEQ0J5WUlPYTNWaVpTMWhjR2x6ClpYSjJaWEtDSG10MVltVXRZWEJwYzJWeWRtVnlMbk5vYjI5MExTMW1iMjh0TFdKaGNvSWlhM1ZpWlMxaGNHbHoKWlhKMlpYSXVjMmh2YjNRdExXWnZieTB0WW1GeUxuTjJZNElLYTNWaVpYSnVaWFJsYzRJU2EzVmlaWEp1WlhSbApjeTVrWldaaGRXeDBnaFpyZFdKbGNtNWxkR1Z6TG1SbFptRjFiSFF1YzNaamdpUnJkV0psY201bGRHVnpMbVJsClptRjFiSFF1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDQ1d4dlkyRnNhRzl6ZEljRWZ3QUFBWWNFWkVBQUFUQU4KQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBcENGSmkxaWx2SDZEc1VuSmxRTDhOcEttN0dXejJoSWlPd2hqQzZHeQp0TjVhWGNDdXkyc0RvRFB2R1dHd3NEZjhHL1JyTGIwNEhJU0JUbUhTUGJvNHZQSnQvcXBacU10bGtLOWFOQ3NlCmtEU3E5UTM5a0hFSmtNcmgrYnpJMUtVa1gycEZoZ0g0bG5qZzA5TVc1KzdMTVBJRXdSVFpjanc5WjJuM0QxTVYKeUIveFpTR1h3V0JSbVg4cjdJcEVmYzNQK0Rmc015MUdEN2c5S2p0cVRIUkg5Tml3cUhoSFpOMkZYZ2w1S2JWYQpjcmwrQmxjemZtbUNubEFOMmpONU85YzV1dHhLRDVOVEdVS1R2b2J0Rkt0VE55dUV0THk2WUZBd2ZGSEx3aEZaCkdURGxJdWpPNVlyK2g0bHpqQ1ZHSCtZdWZSYzRSbVRXMHNadTZseVRiRjRGdnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ kube-apiserver.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBeERlU1VCNWZOaWN1MWNlL3pyZ3ZZczdWem5aZ1NCTXdpbkxnWkE2cGxXbkx5a2Y3ClZmR00yQWFJbEhmbEJoaVY1MGgxSERqL2hwdUpDamdTemRONGxxbll5NzQzdmNDbTM3M21kZnZHUjFncmlrbUsKdDNUUUxzQ1N1bzc4c0xkZnZNV0VQNTNPUldvdnhpZEV6citFUFdpR01CRTFpc3NrSzUvMG5LM0V2ZWF6eTE1OApVLzVtS25mcHJQQUxBei9RZ2hINFFiUWM5OFgyOEE4dlB6M1NjOHJmMy9WSXhxMnE5NWpKYU8wLzFMbE85V3pFCmtESnVaQm5yQkNMdVJTZC81TXVmWW80RWFweEFnVTV4VWNRckk0Y0k3VHlSZjhhbUhENzBSenJhY2pZREJKUlgKNHhWOG54TzRGTjRXMGFYVUJaRjhTUlRuczhZSzkrUGhON1NRQVFJREFRQUJBb0lCQUJsZGRiU1Z1SWt2bDRaYgpSQmhkQndNbTZjeSswTU9BZDQzdU84T3pnWWluSElrUnRSUHZIZDN2T2tpM0Z3d3FzWFlzajdjT1J6b0hjVGU0CjkvVlRtUXNnK2IyYzRXZk9OOFJFc0Q0Z1JnbURCRjNrRStLVFh6WXIvc3ZQSUN0WUNUQkYrRXFoQThGRmNOZVUKeS9oT0diSTJpT0k5MTBZUjdLTVhFbkFPcHBoRnVCYzhJbXRrT2w2MWZlRG15ZnRzTnZ1N25FSXRTeldDZy9XawpoUHVWYTZWMGxETEdjK2JRQi82bGVoK1FvNWxhRmRBSnhjT25VOUYzcWtMTVUrbnZ5YUh5OHR4L25VbitJK3pECnV1VGdJQngzUC91cGVSYi8rQUZvSGN4L3hRZDc5V3h1UXdyYlYrL3RmMzN4SE9VQXNYMTYvYWd2eHY2TzhpdmUKRXBKN3JJRUNnWUVBeEtseHdnYWVwaWo1b0tKVzJJTVE5emFVMEs0Ykh5QTVKV3l1cThQd1BQWmpMalhWNFE4aQpidHlOWW1VK2xMMDZxUDUrdWdBUDduamVoRHM0NktJMUlVa1Nvamx2WUhUTSswUHQ0WUZiVHlMYVh4QW5qRXVyCmtHeXBnUmdWRi9ZSm1zVms3bDIrakF5U2NFeE9nNDFudmJRMzh5L0NMNEZjT3pmUkdEVTRINDBDZ1lFQS8ydkUKeW9OU2hhaUp3QVpaRExrTHVXTVp6UDNxUzFyaDNaaGt5UmxUcHBENm50UWEyaVEwK0hXSDM3c1QzRmlYa28rNApjU015OUN1cnNsL05EV3l2aXNzMDJ0bkJScG5ZaFBoK0VHWnY5ZmkzYUVQR3k2ZmR0cVEzNituWG9kOS9hcWNGClhhUlVyZjdoZ212MUJQQ3NtMVFkZldaRjdxTmQ4Y0FRNWdCZEMwVUNnWUJuZTFYZHA4Z1JYTnhGdDBhRmRTb1gKTzBSQkxtd2RDOEttTzdNSnRQZVR6SDVSMFlneWZkazdocGhxM3lWMzlMNktNQ2dVelhXVW9VdE5QekJwMFBpdQpCQnBtL0Z2cjRHb0FDVFdDQktROFZ2V2JNZy90VmQvNEJnV0haVm1zR3czZ3Y5K2xRZlRERXRaM2V0K1JmM1hJCkw2MkZMR3M4dmcvL0pSYkVhelNWL1FLQmdEOWUrYUJWSExCVXRIaVVHcHZ4ZlZzdG4wVWc2blJ2eEFJNndTYXoKeGZGWm54U3hBMGlFR3pCWjJMQkZ0aFBCVnpuaHRwMDdZblQ2TU1zOHdaOUhDL2FmbkNtQVJWZkM2OWx4M2JVcgo5VE5OMWhOa1hYcEIxOXhzdWdNcUxYblgvY0QwVjN4NFBSbytWcTBKSSsxcDJTbGdvVUJ2azRJMXZpVXd4Z0NLCnJvc1JBb0dBVkVDODI4YlRXYk1xVzlBN2tCVWJzQWpMdHJZNVZhNjFjeGg5M2pOeHpJbnlHS2JEMVF5ZEFRSXMKNDlrcjZUSURaZUpGY2M5ZjFaQU5RRjZSTXRLL1g2QzFjRjl5TEJTWXNTL1d5VTg1dzhtbFN3SmNqcCtnWWNYVApZaE9vdHloM0dwbnlnSGQycUw3M1pIVmMrRFFVZWdJTkdSbGhUM2YxWjFiQmxpd0VyU2s9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: gardener
+ namespace: shoot--foo--bar
+type: Opaque
+data:
+ ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrakNDQWVLZ0F3SUJBZ0lVVHAzWHZocldPVk04WkdlODZZb1hNVi9VSjdBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZURVRNQkVHQTFVRUF4TUthM1ZpWlhKdVpYUmxjekFlRncweE9UQXlNamN4TlRNME1EQmFGdzB5TkRBeQpNall4TlRNME1EQmFNQlV4RXpBUkJnTlZCQU1UQ210MVltVnlibVYwWlhNd2dnRWlNQTBHQ1NxR1NJYjNEUUVCCkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDeWkwUUdPY3YyYlRmM044T0xOOTdSd3NnSDZRQXI4d1NwQU9ydHRCSmcKRm5mblUyVDFSSGd4bTdxZDE5MFdMOERDaHYwZFpmNzZkNmVTUTRacmpqeUFyVHp1ZmI0RHRQd2crVldxN1h2RgpCTnluKzJoZjRTeVNrd2Q2azdYTGhVVFJ4MDQ4SWJCeUM0ditGRXZtb0xBd3JjMGQwRzE0ZWM2c25EKzdqTzdlCmt5a1EvTmdBT0w3UDZrRHM5ejYrYk9mZ0YwbkdOK2JtZVdRcUplalIwdCtPeVFEQ3g1L0ZNdFVmRVZSNVFYODAKYWVlZmdwM0pGWmI2ZkF3OUtoTHRkUlYzRlAwdHo2aFMrZTRTZzBtd0FBT3FpalpzVjg3a1A1R1l6anRjZkExMgpsRFlsL25iMUd0VnZ2a1FENDlWblY3bURubDZtRzNMQ01OQ05INldsWk52M0FnTUJBQUdqUWpCQU1BNEdBMVVkCkR3RUIvd1FFQXdJQkJqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU0ZBM0x2Sk0yMWQ4cXMKWlZWQ2U2UnJUVDl3aVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQW5zL0VKM3lLc2p0SVNvdGVRNzE0cjJVbQpCTVB5VVlUVGRSSEQ4TFpNZDNSeWt2c2FjRjJsMnk4OE56NndKY0F1b1VqMWg4YUJEUDVvWFZ0Tm1GVDlqeWJTClRYclJ2V2krYWVZZGI1NTZuRUE1L2E5NGUrY2IrQ2szcXkvMXhnUW9TNDU3QVpRT0Rpc0RaTkJZV2tBRnMyTGMKdWNwY0F0WEp0SXRoVm03RmpvQUhZY3NyWTA0eUFpWUVKTEQwMlRqVURYZzRpR09HTWtWSGRtaGF3QkRCRjNBagplc2ZjcUZ3amk2SnlBS0ZSQUNQb3d5a1FPTkZ3VVNvbTg5dVlFU1NDSkZ2TkNrOU1KbWpKMlB6RFV0NkN5cFI0CmVwRmRkMWZYTHd1d243ZnZQTW1KcUQzSHRMYWxYMUFabVBrK0JJOGV6ZkFpVmNWcW5USlFNWGxZUHBZZTlBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+ gardener.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURSRENDQWl5Z0F3SUJBZ0lVRWFQVkVoYXMyck5wMHRwQWVwbGs2UE00emZrd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZURVRNQkVHQTFVRUF4TUthM1ZpWlhKdVpYUmxjekFlRncweE9UQXlNamN4TlRNMU1EQmFGdzB5T1RBeQpNalF4TlRNMU1EQmFNQ3d4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUkV3RHdZRFZRUURFd2huCllYSmtaVzVsY2pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTUlJRTd3SVJ4amEKYlcrdldJTFF1QjZ6WkI0by9vU0NOaWp1UVZGQy9FYkhzREtJZjVNcHpnWlMrQ3FTbFBTYnM3UkNvL2JPVVhCcApLZE52c01lOW82RXRGSnZTd3MrTlFpY0d5TEsvN2FTZmhwQkYyL3R6amUzYU40MiswS2RaRG81TWNuWW1PYVY4CmtFeGhRbmpyQkVQd05hRW12MnRyTGljdG1XYjNBZjFKZmgrU0RkNkh6S1ZkS2FWYzd1R2NvOEtSL2xxRTBNUGkKRkI0clFabnJvNFMxMXVVSWx6ckc2VFdvZVNUMXlrWXlSazNxQnlLSG12NnBUaWhKa2ZWcXRLS0RWNzdTajZIOApkenIvdm1WMlpSYzhVSTh4d3Y0ZVgvbjQzcXpwUHNJNU14a0FHQ0tkSTVFb3lvSHNtTWNJckQ1d1NkZkNHd3F5Ckd2U3drNTBDaiswQ0F3RUFBYU4xTUhNd0RnWURWUjBQQVFIL0JBUURBZ1dnTUJNR0ExVWRKUVFNTUFvR0NDc0cKQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZPUk1UTnF1eWFQaERQY1ZiSDZxZlQ3eApBZE50TUI4R0ExVWRJd1FZTUJhQUZJVURjdThremJWM3lxeGxWVUo3cEd0TlAzQ0pNQTBHQ1NxR1NJYjNEUUVCCkN3VUFBNElCQVFBUXZGOFRlZFBlL2F1WWxNM2t0aW9sRlpIcHpSSG5QcHZjU3hOWU03ZE5IbjBFOWlPRFlYK1AKUTZBZ1lGcE5XRzN6V0dqZC9jQXJwVXhjb25CV2FwWmpCWHhKYS9oL0htSktpdHRDVmRGM2NVQ0lwK3VZdVYyWgpsR3l5TmM4ZHRVanFKZmd3cVkrQzZxZmFUVmQ3SlZRcjlZczZhTWl5TEZEclh3N1ZFWU5CWWZUUEwrSWRGUzVDCjIxSXNXY2w4bGJLa3RTYWxVTlF1NUNVTFVBT3FhdXJpRCt2QTl3TW8wZERtMGMrQTZLUEJNOVVzay9tNlFXcmwKWTRjb1cwT1ArZG9BelpMeUpTRDQ3UUdRWTVOZHB6Rm5lR2o0Y3pMSFgyMVJudkc0SFV2VlpjcTZGdmFPZitnRwpzRGc5emsrRzRoV2hrNGlZV0dnQnJTU3ZwR01XZzY0RQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ gardener.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBd2dnVHZBaEhHTnB0YjY5WWd0QzRIck5rSGlqK2hJSTJLTzVCVVVMOFJzZXdNb2gvCmt5bk9CbEw0S3BLVTlKdXp0RUtqOXM1UmNHa3AwMit3eDcyam9TMFVtOUxDejQxQ0p3Yklzci90cEorR2tFWGIKKzNPTjdkbzNqYjdRcDFrT2preHlkaVk1cFh5UVRHRkNlT3NFUS9BMW9TYS9hMnN1SnkyWlp2Y0IvVWwrSDVJTgozb2ZNcFYwcHBWenU0Wnlqd3BIK1dvVFF3K0lVSGl0Qm1ldWpoTFhXNVFpWE9zYnBOYWg1SlBYS1JqSkdUZW9ICklvZWEvcWxPS0VtUjlXcTBvb05YdnRLUG9meDNPdisrWlhabEZ6eFFqekhDL2g1ZitmamVyT2srd2prekdRQVkKSXAwamtTaktnZXlZeHdpc1BuQkoxOEliQ3JJYTlMQ1RuUUtQN1FJREFRQUJBb0lCQVFDeVd0NVpEM3JiRzUxUQo5bXQwQkF2YkhLZEJHZ21ySUU1TW5ZV28vdHdLNisvSTQ3UHhRSkpET2Uwb3BRa0xPRHlkekV3UHlHTVA0M3N4CkFoQUw2d1FJV2ZvQnFtQ3NqSHBzaVUzZkVaR09xNXp6N1dOaTVqdG1raHBTenozWk5vR3N5QlRIQ2lnZk9Ec2sKR3BzUnIyYUNWTUhYV2xqR2t3VWZ1a2l5WHdlVEhHckFsVGlNdHErakN0N0NvK1VGMDR0VUVBMVZHajlOT2ZSbQpwNmp5OTVlVkpyWmtkVkZ2UUtSeDRnczJtcFg0bnBHaFhKUExQYk14RXExNzJyalZqbWpxRjZPTGU0YVBRd2g5CkN5cGdmUjBCM2M5enU0MlBnSTFtU1o1UHRPK2pabS9udmRnZkZIOHlRVnIwTll6bHVQS2VRb0dvTFpnMWI5dTYKdndWYnUwMjVBb0dCQU94MGYza1NMbUNJWGJ1T3h1bzNEWk1nQmFINXpCOEdqWVFJVmVnVHA4RUM4bWd2c2J1agppSFpjaFFQZjFjVExaMmhqTUYvK01VM2RVMXFpU2RpaEEyOG9hY3ozV283QjNrVW91ZU5kTDFwa251TURjMUMzClA5NzFrK25MTUNXZDdERXBSc2tJaUVXTXQ1Y2E4bFpGd1pRcno1aEFTZ1BVOGd3bStSaGh0SEdyQW9HQkFOSVIKNEpPVlNZVGpvSU53bjBpYjNyMGJvT0ZRcXlVa1Y0NWJmY3RNYzhxbngwYWx1Q0ZCVDBjdGhnYUVIdlRpWnZGQwowMVVHWFBSSEg0TXdiZmlDcnhoTE9hTFYwYkdaOSs4VTNkYVFOSDJSRjFtSVZOL0lYRG9GWUR5dlFrUm90OGFQCnI0VEhLWUIwNGVCSWlsS1NRRUEzbUFRN3l3RjdaQ1RqMm9PWGJwekhBb0dBSU5qT0hITjZIYzdUdG9wTzk1cE8KT0hIcVZtWHRCU0pUNWxPQ0c0c2ZjcUNHTEJMMERhelorQmRDSXhlbENvRjNDL2s3YnhwUW54QnYwOWRYaXRCVApPYzNUWkxXM3pyei9zM3ZFMXVETEF6T1hIdElMNHRxUjlOY0loU0hFdm5VQnFwS2hVcXZzd2p5YUJGT3A5bklhClFnc1QrNEp5eGJWL0tsRC9xWE5wTGFFQ2dZQThNLzltakZ3N2gzalM4bzNUbzBnY2JqU1hIaVZPU2JJR0RGWnEKMHdoRVJmMG9WQW9IRGM2SW1aVlZmTHZhZHFBRitKN1VPVFNlbFZ1RW4zYUV4LzhpT1R6VlcrM3l0aXVHQ3piZwpiUWQrRFB6aUhkNGxIQ2pDUUJRcWtCZXZ2MHNienNZQWlZdHRVeElBMHdsQlRNUzVJcldnVVBxRy9EUGhGcVBqCkhwMEd0UUtCZ1FDd2RqOE9SMkRROUJ4VlI5ejFzMEZvSG1rNDRQaDFtUEt0bVVmMWZoZEhDN2tsMlVURm5yVXIKd1hyWWZqVjRsSHVjaitvMklNUjBGVCs4QjJZS0FWTGNMZEZyaWJtY0cxZHNrRHR0bXBxdWpLY2t1OXAveWdmNApxd0Z5QWJjR0I2OGtRZVpJSXVYOTM3UzJRY3AxUExwVjl2Zm9JUURkRVVBM1JBTjBSRC80ZlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
+ kubeconfig: LS0tCmFwaVZlcnNpb246IHYxCmtpbmQ6IENvbmZpZwpjdXJyZW50LWNvbnRleHQ6IHNtYWxsLWNsdXN0ZXIKY2x1c3RlcnM6Ci0gbmFtZTogc21hbGwtY2x1c3RlcgogIGNsdXN0ZXI6CiAgICBjZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YTogTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTXJha05EUVdWTFowRjNTVUpCWjBsVlZIQXpXSFpvY2xkUFZrMDRXa2RsT0RaWmIxaE5WaTlWU2pkQmQwUlJXVXBMYjFwSmFIWmpUa0ZSUlV3S1FsRkJkMFpVUlZSTlFrVkhRVEZWUlVGNFRVdGhNMVpwV2xoS2RWcFlVbXhqZWtGbFJuY3dlRTlVUVhsTmFtTjRUbFJOTUUxRVFtRkdkekI1VGtSQmVRcE5hbGw0VGxSTk1FMUVRbUZOUWxWNFJYcEJVa0puVGxaQ1FVMVVRMjEwTVZsdFZubGliVll3V2xoTmQyZG5SV2xOUVRCSFExTnhSMU5KWWpORVVVVkNDa0ZSVlVGQk5FbENSSGRCZDJkblJVdEJiMGxDUVZGRGVXa3dVVWRQWTNZeVlsUm1NMDQ0VDB4T09UZFNkM05uU0RaUlFYSTRkMU53UVU5eWRIUkNTbWNLUm01bWJsVXlWREZTU0dkNGJUZHhaREU1TUZkTU9FUkRhSFl3WkZwbU56WmtObVZUVVRSYWNtcHFlVUZ5VkhwMVptSTBSSFJRZDJjclZsZHhOMWgyUmdwQ1RubHVLekpvWmpSVGVWTnJkMlEyYXpkWVRHaFZWRko0TURRNFNXSkNlVU0wZGl0R1JYWnRiMHhCZDNKak1HUXdSekUwWldNMmMyNUVLemRxVHpkbENtdDVhMUV2VG1kQlQwdzNVRFpyUkhNNWVqWXJZazltWjBZd2JrZE9LMkp0WlZkUmNVcGxhbEl3ZEN0UGVWRkVRM2cxTDBaTmRGVm1SVlpTTlZGWU9EQUtZV1ZsWm1kd00wcEdXbUkyWmtGM09VdG9USFJrVWxZelJsQXdkSG8yYUZNclpUUlRaekJ0ZDBGQlQzRnBhbHB6VmpnM2ExQTFSMWw2YW5SalprRXhNZ3BzUkZsc0wyNWlNVWQwVm5aMmExRkVORGxXYmxZM2JVUnViRFp0UnpOTVEwMU9RMDVJTmxkc1drNTJNMEZuVFVKQlFVZHFVV3BDUVUxQk5FZEJNVlZrQ2tSM1JVSXZkMUZGUVhkSlFrSnFRVkJDWjA1V1NGSk5Ra0ZtT0VWQ1ZFRkVRVkZJTDAxQ01FZEJNVlZrUkdkUlYwSkNVMFpCTTB4MlNrMHlNV1E0Y1hNS1dsWldRMlUyVW5KVVZEbDNhVlJCVGtKbmEzRm9hMmxIT1hjd1FrRlJjMFpCUVU5RFFWRkZRVzV6TDBWS00zbExjMnAwU1ZOdmRHVlJOekUwY2pKVmJRcENUVkI1VlZsVVZHUlNTRVE0VEZwTlpETlNlV3QyYzJGalJqSnNNbms0T0U1Nk5uZEtZMEYxYjFWcU1XZzRZVUpFVURWdldGWjBUbTFHVkRscWVXSlRDbFJZY2xKMlYya3JZV1ZaWkdJMU5UWnVSVUUxTDJFNU5HVXJZMklyUTJzemNYa3ZNWGhuVVc5VE5EVTNRVnBSVDBScGMwUmFUa0paVjJ0QlJuTXlUR01LZFdOd1kwRjBXRXAwU1hSb1ZtMDNSbXB2UVVoWlkzTnlXVEEwZVVGcFdVVktURVF3TWxScVZVUllaelJwUjA5SFRXdFdTR1J0YUdGM1FrUkNSak5CYWdwbGMyWmpjVVozYW1rMlNubEJTMFpTUVVOUWIzZDVhMUZQVGtaM1ZWTnZiVGc1ZFZsRlUxTkRTa1oyVGtOck9VMUtiV3BLTWxCNlJGVjBOa041Y0ZJMENtVndSbVJrTVdaWVRIZDFkMjQzWm5aUVRXMUtjVVF6U0hSTVlXeFlNVUZhYlZCckswSkpPR1Y2WmtGcFZtTldjVzVVU2xGTldHeFpVSEJaWlRsQlBUMEtMUzB0TFMxRlRrUWdRMFZTVkVsR1NVTkJWRVV0TFMwdExRbz0KICAgIHNlcnZlcjogaHR0cHM6Ly9sb2NhbGhvc3Q6MzIyMjMKY29udGV4dHM6Ci0gbmFtZTogc21hbGwtY2x1c3RlcgogIGNvbnRleHQ6CiAgICBjbHVzdGVyOiBzbWFsbC1jbHVzdGVyCiAgICB1c2VyOiBzbWFsbC1jbHVzdGVyCnVzZXJzOgotIG5hbWU6IHNtYWxsLWNsdXN0ZXIKICB1c2VyOgogICAgY2xpZW50LWNlcnRpZmljYXRlLWRhdGE6IExTMHRMUzFDUlVkSlRpQkRSVkpVU1VaSlEwRlVSUzB0TFMwdENrMUpTVVJTUkVORFFXbDVaMEYzU1VKQlowbFZSV0ZRVmtWb1lYTXljazV3TUhSd1FXVndiR3MyVUUwMGVtWnJkMFJSV1VwTGIxcEphSFpqVGtGUlJVd0tRbEZCZDBaVVJWUk5Ra1ZIUVRGVlJVRjRUVXRoTTFacFdsaEtkVnBZVW14amVrRmxSbmN3ZUU5VVFYbE5hbU40VGxSTk1VMUVRbUZHZHpCNVQxUkJlUXBOYWxGNFRsUk5NVTFFUW1GTlEzZDRSbnBCVmtKblRsWkNRVzlVUkc1T05XTXpVbXhpVkhCMFdWaE9NRnBZU25wTlVrVjNSSGRaUkZaUlVVUkZkMmh1Q2xsWVNtdGFWelZzWTJwRFEwRlRTWGRFVVZsS1MyOWFTV2gyWTA1QlVVVkNRbEZCUkdkblJWQkJSRU5EUVZGdlEyZG5SVUpCVFVsSlJUZDNTVko0YW1FS1lsY3JkbGRKVEZGMVFqWjZXa0kwYnk5dlUwTk9hV3AxVVZaR1F5OUZZa2h6UkV0SlpqVk5jSHBuV2xNclEzRlRiRkJUWW5NM1VrTnZMMkpQVlZoQ2NBcExaRTUyYzAxbE9XODJSWFJHU25aVGQzTXJUbEZwWTBkNVRFc3ZOMkZUWm1od1FrWXlMM1I2YW1VellVNDBNaXN3UzJSYVJHODFUV051V1cxUFlWWTRDbXRGZUdoUmJtcHlRa1ZRZDA1aFJXMTJNblJ5VEdsamRHMVhZak5CWmpGS1ptZ3JVMFJrTmtoNlMxWmtTMkZXWXpkMVIyTnZPRXRTTDJ4eFJUQk5VR2tLUmtJMGNsRmFibkp2TkZNeE1YVlZTV3g2Y2tjMlZGZHZaVk5VTVhscldYbFNhek54UW5sTFNHMTJObkJVYVdoS2EyWldjWFJMUzBSV056ZFRhalpJT0Fwa2VuSXZkbTFXTWxwU1l6aFZTVGg0ZDNZMFpWZ3ZialF6Y1hwd1VITkpOVTE0YTBGSFEwdGtTVFZGYjNsdlNITnRUV05KY2tRMWQxTmtaa05IZDNGNUNrZDJVM2RyTlRCRGFpc3dRMEYzUlVGQllVNHhUVWhOZDBSbldVUldVakJRUVZGSUwwSkJVVVJCWjFkblRVSk5SMEV4VldSS1VWRk5UVUZ2UjBORGMwY0tRVkZWUmtKM1RVTk5RWGRIUVRGVlpFVjNSVUl2ZDFGRFRVRkJkMGhSV1VSV1VqQlBRa0paUlVaUFVrMVVUbkYxZVdGUWFFUlFZMVppU0RaeFpsUTNlQXBCWkU1MFRVSTRSMEV4VldSSmQxRlpUVUpoUVVaSlZVUmpkVGhyZW1KV00zbHhlR3hXVlVvM2NFZDBUbEF6UTBwTlFUQkhRMU54UjFOSllqTkVVVVZDQ2tOM1ZVRkJORWxDUVZGQlVYWkdPRlJsWkZCbEwyRjFXV3hOTTJ0MGFXOXNSbHBJY0hwU1NHNVFjSFpqVTNoT1dVMDNaRTVJYmpCRk9XbFBSRmxZSzFBS1VUWkJaMWxHY0U1WFJ6TjZWMGRxWkM5alFYSndWWGhqYjI1Q1YyRndXbXBDV0hoS1lTOW9MMGh0U2t0cGRIUkRWbVJHTTJOVlEwbHdLM1ZaZFZZeVdncHNSM2w1VG1NNFpIUlZhbkZLWm1kM2NWa3JRelp4Wm1GVVZtUTNTbFpSY2psWmN6WmhUV2w1VEVaRWNsaDNOMVpGV1U1Q1dXWlVVRXdyU1dSR1V6VkRDakl4U1hOWFkydzRiR0pMYTNSVFlXeFZUbEYxTlVOVlRGVkJUM0ZoZFhKcFJDdDJRVGwzVFc4d1pFUnRNR01yUVRaTFVFSk5PVlZ6YXk5dE5sRlhjbXdLV1RSamIxY3dUMUFyWkc5QmVscE1lVXBUUkRRM1VVZFJXVFZPWkhCNlJtNWxSMm8wWTNwTVNGZ3lNVkp1ZGtjMFNGVjJWbHBqY1RaR2RtRlBaaXRuUndwelJHYzVlbXNyUnpSb1YyaHJOR2xaVjBkblFuSlRVM1p3UjAxWFp6WTBSUW90TFMwdExVVk9SQ0JEUlZKVVNVWkpRMEZVUlMwdExTMHRDZz09CiAgICBjbGllbnQta2V5LWRhdGE6IExTMHRMUzFDUlVkSlRpQlNVMEVnVUZKSlZrRlVSU0JMUlZrdExTMHRMUXBOU1VsRmNFRkpRa0ZCUzBOQlVVVkJkMmRuVkhaQmFFaEhUbkIwWWpZNVdXZDBRelJJY2s1clNHbHFLMmhKU1RKTFR6VkNWVlZNT0ZKelpYZE5iMmd2Q210NWJrOUNiRXcwUzNCTFZUbEtkWHAwUlV0cU9YTTFVbU5IYTNBd01pdDNlRGN5YW05VE1GVnRPVXhEZWpReFEwcDNZa2x6Y2k5MGNFb3JSMnRGV0dJS0t6TlBUamRrYnpOcVlqZFJjREZyVDJwcmVIbGthVmsxY0ZoNVVWUkhSa05sVDNORlVTOUJNVzlUWVM5aE1uTjFTbmt5V2xwMlkwSXZWV3dyU0RWSlRnb3piMlpOY0ZZd2NIQldlblUwV25scWQzQklLMWR2VkZGM0swbFZTR2wwUW0xbGRXcG9URmhYTlZGcFdFOXpZbkJPWVdnMVNsQllTMUpxU2tkVVpXOUlDa2x2WldFdmNXeFBTMFZ0VWpsWGNUQnZiMDVZZG5STFVHOW1lRE5QZGlzcldsaGFiRVo2ZUZGcWVraERMMmcxWml0bWFtVnlUMnNyZDJwcmVrZFJRVmtLU1hBd2FtdFRha3RuWlhsWmVIZHBjMUJ1UWtveE9FbGlRM0pKWVRsTVExUnVVVXRRTjFGSlJFRlJRVUpCYjBsQ1FWRkRlVmQwTlZwRU0zSmlSelV4VVFvNWJYUXdRa0YyWWtoTFpFSkhaMjF5U1VVMVRXNVpWMjh2ZEhkTE5pc3ZTVFEzVUhoUlNrcEVUMlV3YjNCUmEweFBSSGxrZWtWM1VIbEhUVkEwTTNONENrRm9RVXcyZDFGSlYyWnZRbkZ0UTNOcVNIQnphVlV6WmtWYVIwOXhOWHA2TjFkT2FUVnFkRzFyYUhCVGVub3pXazV2UjNONVFsUklRMmxuWms5RWMyc0tSM0J6VW5JeVlVTldUVWhZVjJ4cVIydDNWV1oxYTJsNVdIZGxWRWhIY2tGc1ZHbE5kSEVyYWtOME4wTnZLMVZHTURSMFZVVkJNVlpIYWpsT1QyWlNiUXB3Tm1wNU9UVmxWa3B5V210a1ZrWjJVVXRTZURSbmN6SnRjRmcwYm5CSGFGaEtVRXhRWWsxNFJYRXhOekp5YWxacWJXcHhSalpQVEdVMFlWQlJkMmc1Q2tONWNHZG1VakJDTTJNNWVuVTBNbEJuU1RGdFUxbzFVSFJQSzJwYWJTOXVkbVJuWmtaSU9IbFJWbkl3VGxsNmJIVlFTMlZSYjBkdlRGcG5NV0k1ZFRZS2RuZFdZblV3TWpWQmIwZENRVTk0TUdZemExTk1iVU5KV0dKMVQzaDFiek5FV2sxblFtRklOWHBDT0VkcVdWRkpWbVZuVkhBNFJVTTRiV2QyYzJKMWFncHBTRnBqYUZGUVpqRmpWRXhhTW1ocVRVWXZLMDFWTTJSVk1YRnBVMlJwYUVFeU9HOWhZM296VjI4M1FqTnJWVzkxWlU1a1RERndhMjUxVFVSak1VTXpDbEE1TnpGcksyNU1UVU5YWkRkRVJYQlNjMnRKYVVWWFRYUTFZMkU0YkZwR2QxcFJjbm8xYUVGVFoxQlZPR2QzYlN0U2FHaDBTRWR5UVc5SFFrRk9TVklLTkVwUFZsTlpWR3B2U1U1M2JqQnBZak55TUdKdlQwWlJjWGxWYTFZME5XSm1ZM1JOWXpoeGJuZ3dZV3gxUTBaQ1ZEQmpkR2huWVVWSWRsUnBXblpHUXdvd01WVkhXRkJTU0VnMFRYZGlabWxEY25ob1RFOWhURll3WWtkYU9TczRWVE5rWVZGT1NESlNSakZ0U1ZaT0wwbFlSRzlHV1VSNWRsRnJVbTkwT0dGUUNuSTBWRWhMV1VJd05HVkNTV2xzUzFOUlJVRXpiVUZSTjNsM1JqZGFRMVJxTW05UFdHSndla2hCYjBkQlNVNXFUMGhJVGpaSVl6ZFVkRzl3VHprMWNFOEtUMGhJY1ZadFdIUkNVMHBVTld4UFEwYzBjMlpqY1VOSFRFSk1NRVJoZWxvclFtUkRTWGhsYkVOdlJqTkRMMnMzWW5od1VXNTRRbll3T1dSWWFYUkNWQXBQWXpOVVdreFhNM3B5ZWk5ek0zWkZNWFZFVEVGNlQxaElkRWxNTkhSeFVqbE9ZMGxvVTBoRmRtNVZRbkZ3UzJoVmNYWnpkMnA1WVVKR1QzQTVia2xoQ2xGbmMxUXJORXA1ZUdKV0wwdHNSQzl4V0U1d1RHRkZRMmRaUVRoTkx6bHRha1ozTjJnemFsTTRiek5VYnpCblkySnFVMWhJYVZaUFUySkpSMFJHV25FS01IZG9SVkptTUc5V1FXOUlSR00yU1cxYVZsWm1USFpoWkhGQlJpdEtOMVZQVkZObGJGWjFSVzR6WVVWNEx6aHBUMVI2Vmxjck0zbDBhWFZIUTNwaVp3cGlVV1FyUkZCNmFVaGtOR3hJUTJwRFVVSlJjV3RDWlhaMk1ITmllbk5aUVdsWmRIUlZlRWxCTUhkc1FsUk5VelZKY2xkblZWQnhSeTlFVUdoR2NWQnFDa2h3TUVkMFVVdENaMUZEZDJScU9FOVNNa1JST1VKNFZsSTVlakZ6TUVadlNHMXJORFJRYURGdFVFdDBiVlZtTVdab1pFaEROMnRzTWxWVVJtNXlWWElLZDFoeVdXWnFWalJzU0hWamFpdHZNa2xOVWpCR1ZDczRRakpaUzBGV1RHTk1aRVp5YVdKdFkwY3haSE5yUkhSMGJYQnhkV3BMWTJ0MU9YQXZlV2RtTkFweGQwWjVRV0pqUjBJMk9HdFJaVnBKU1hWWU9UTTNVekpSWTNBeFVFeHdWamwyWm05SlVVUmtSVlZCTTFKQlRqQlNSQzgwWmxFOVBRb3RMUzB0TFVWT1JDQlNVMEVnVUZKSlZrRlVSU0JMUlZrdExTMHRMUW89Cg==
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-apiserver
+ namespace: shoot--foo--bar
+spec:
+ ports:
+ - name: kube-apiserver
+ nodePort: 32223
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ app: kubernetes
+ role: apiserver
+ type: NodePort
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: kubernetes
+ role: apiserver
+ name: kube-apiserver
+ namespace: shoot--foo--bar
+spec:
+ progressDeadlineSeconds: 600
+ replicas: 1
+ revisionHistoryLimit: 0
+ selector:
+ matchLabels:
+ app: kubernetes
+ role: apiserver
+ strategy:
+ rollingUpdate:
+ maxSurge: 25%
+ maxUnavailable: 25%
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: kubernetes
+ role: apiserver
+ spec:
+ containers:
+ - command:
+ - /hyperkube
+ - apiserver
+ - --enable-admission-plugins=Priority,NamespaceLifecycle,LimitRanger,PodSecurityPolicy,ServiceAccount,NodeRestriction,DefaultStorageClass,Initializers,DefaultTolerationSeconds,ResourceQuota,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook
+ - --disable-admission-plugins=PersistentVolumeLabel
+ - --allow-privileged=true
+ - --anonymous-auth=false
+ - --authorization-mode=Node,RBAC
+ - --client-ca-file=/srv/kubernetes/ca/ca.crt
+ - --enable-aggregator-routing=true
+ - --enable-bootstrap-token-auth=true
+ - --http2-max-streams-per-connection=1000
+ - --endpoint-reconciler-type=none
+ - --etcd-servers=http://etcd:2379
+ - --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP
+ - --insecure-port=0
+ - --profiling=false
+ - --secure-port=443
+ - --service-cluster-ip-range=100.64.0.0/13
+ - --tls-cert-file=/srv/kubernetes/apiserver/kube-apiserver.crt
+ - --tls-private-key-file=/srv/kubernetes/apiserver/kube-apiserver.key
+ - --v=2
+ image: k8s.gcr.io/hyperkube:v1.12.6
+ imagePullPolicy: IfNotPresent
+ name: kube-apiserver
+ ports:
+ - containerPort: 443
+ name: https
+ protocol: TCP
+ - containerPort: 8080
+ name: local
+ protocol: TCP
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /srv/kubernetes/ca
+ name: ca
+ - mountPath: /srv/kubernetes/apiserver
+ name: kube-apiserver
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ tolerations:
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: ca
+ secret:
+ defaultMode: 420
+ secretName: ca
+ - name: kube-apiserver
+ secret:
+ defaultMode: 420
+ secretName: kube-apiserver
diff --git a/example/20-crd-cluster.yaml b/example/20-crd-cluster.yaml
new file mode 100644
index 0000000..9397fd5
--- /dev/null
+++ b/example/20-crd-cluster.yaml
@@ -0,0 +1,23 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusters.extensions.gardener.cloud
+spec:
+ group: extensions.gardener.cloud
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
+ version: v1alpha1
+ scope: Cluster
+ names:
+ plural: clusters
+ singular: cluster
+ kind: Cluster
+ additionalPrinterColumns:
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+ subresources:
+ status: {}
diff --git a/example/20-crd-extension.yaml b/example/20-crd-extension.yaml
new file mode 100644
index 0000000..6fcb01c
--- /dev/null
+++ b/example/20-crd-extension.yaml
@@ -0,0 +1,120 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: extensions.extensions.gardener.cloud
+spec:
+ group: extensions.gardener.cloud
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ plural: extensions
+ singular: extension
+ kind: Extension
+ shortNames:
+ - ext
+ additionalPrinterColumns:
+ - name: Type
+ type: string
+ description: The type of the Extension resource.
+ JSONPath: .spec.type
+ - name: State
+ type: string
+ JSONPath: .status.lastOperation.state
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ type:
+ description: Type contains the instance of the resource's kind.
+ type: string
+ providerConfig:
+ description: ProviderConfig holds the configuration for the acting extension controller.
+ type: object
+ required:
+ - type
+ status:
+ properties:
+ lastError:
+ description: LastError holds information about the last occurred error
+ during an operation.
+ properties:
+ codes:
+ description: Well-defined error codes of the last error(s).
+ items:
+ type: string
+ type: array
+ description:
+ description: A human readable message indicating details about the
+ last error.
+ type: string
+ required:
+ - description
+ type: object
+ lastOperation:
+ description: LastOperation holds information about the last operation
+ on the resource.
+ properties:
+ description:
+ description: A human readable message indicating details about the
+ last operation.
+ type: string
+ lastUpdateTime:
+ description: Last time the operation state transitioned from one
+ to another.
+ format: date-time
+ type: string
+ progress:
+ description: The progress in percentage (0-100) of the last operation.
+ format: int64
+ type: integer
+ state:
+ description: Status of the last operation, one of Aborted, Processing,
+ Succeeded, Error, Failed.
+ type: string
+ type:
+ description: Type of the last operation, one of Create, Reconcile,
+ Delete.
+ type: string
+ required:
+ - description
+ - lastUpdateTime
+ - progress
+ - state
+ - type
+ type: object
+ observedGeneration:
+ description: ObservedGeneration is the most recent generation observed
+ for this resource.
+ format: int64
+ type: integer
+ state:
+ description: State can be filled by the operating controller with what
+ ever data it needs.
+ type: string
+ providerStatus:
+ description: Provider-specific output for this control plane
+ type: object
+ type: object
diff --git a/example/20-crd-issuer.yaml b/example/20-crd-issuer.yaml
new file mode 100644
index 0000000..87a2f99
--- /dev/null
+++ b/example/20-crd-issuer.yaml
@@ -0,0 +1,45 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: issuers.cert.gardener.cloud
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .spec.acme.server
+ description: ACME Server
+ name: SERVER
+ type: string
+ - JSONPath: .spec.acme.email
+ description: ACME Registration email
+ name: EMAIL
+ type: string
+ - JSONPath: .status.state
+ description: Status of registration
+ name: STATUS
+ type: string
+ - JSONPath: .status.type
+ description: Issuer type
+ name: TYPE
+ type: string
+ - JSONPath: .metadata.creationTimestamp
+ name: AGE
+ type: date
+ conversion:
+ strategy: None
+ group: cert.gardener.cloud
+ names:
+ kind: Issuer
+ listKind: IssuerList
+ plural: issuers
+ shortNames:
+ - issuer
+ singular: issuer
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/example/20-crd-managedresource.yaml b/example/20-crd-managedresource.yaml
new file mode 100644
index 0000000..975b1f3
--- /dev/null
+++ b/example/20-crd-managedresource.yaml
@@ -0,0 +1,36 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: managedresources.resources.gardener.cloud
+spec:
+ group: resources.gardener.cloud
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ plural: managedresources
+ singular: managedresource
+ kind: ManagedResource
+ shortNames:
+ - mr
+ additionalPrinterColumns:
+ - name: Class
+ type: string
+ description: The class identifies which resource manager is responsible for this ManagedResource.
+ JSONPath: .spec.class
+ - name: Applied
+ type: string
+ description: Indicates whether all resources have been applied.
+ JSONPath: .status.conditions[?(@.type=="ResourcesApplied")].status
+ - name: Healthy
+ type: string
+ description: Indicates whether all resources are healthy.
+ JSONPath: .status.conditions[?(@.type=="ResourcesHealthy")].status
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+ subresources:
+ status: {}
\ No newline at end of file
diff --git a/example/25-rbac.yaml b/example/25-rbac.yaml
new file mode 100644
index 0000000..400dfed
--- /dev/null
+++ b/example/25-rbac.yaml
@@ -0,0 +1,23 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: gardener-extension-shoot-fleet-agent
+ labels:
+ app.kubernetes.io/name: gardener-extension-shoot-fleet-agent
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ resourceNames:
+ - gardener-extension-shoot-fleet-agent
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
diff --git a/example/30-cluster.yaml b/example/30-cluster.yaml
new file mode 100644
index 0000000..82f5a4f
--- /dev/null
+++ b/example/30-cluster.yaml
@@ -0,0 +1,28 @@
+---
+apiVersion: extensions.gardener.cloud/v1alpha1
+kind: Cluster
+metadata:
+ name: shoot--foo--bar
+spec:
+ cloudProfile:
+ apiVersion: core.gardener.cloud/v1beta1
+ kind: CloudProfile
+ seed:
+ apiVersion: core.gardener.cloud/v1beta1
+ kind: Seed
+ shoot:
+ apiVersion: core.gardener.cloud/v1beta1
+ kind: Shoot
+ metadata:
+ generation: 1
+ name: shoot--foo--bar
+ spec:
+ dns:
+ domain: foo.bar.example.com
+ kubernetes:
+ version: 1.18.2
+ status:
+ lastOperation:
+ state: Succeeded
+ observedGeneration: 1
+
diff --git a/example/30-extension.yaml b/example/30-extension.yaml
new file mode 100644
index 0000000..b4b41a4
--- /dev/null
+++ b/example/30-extension.yaml
@@ -0,0 +1,11 @@
+---
+apiVersion: extensions.gardener.cloud/v1alpha1
+kind: Extension
+metadata:
+ name: certificate-service
+ namespace: shoot--foo--bar
+spec:
+ type: shoot-fleet-agent
+ providerConfig:
+ apiVersion: service.cert.extensions.gardener.cloud/v1alpha1
+ kind: CertConfig
diff --git a/example/controller-registration.yaml b/example/controller-registration.yaml
new file mode 100644
index 0000000..9e61f8c
--- /dev/null
+++ b/example/controller-registration.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: core.gardener.cloud/v1beta1
+kind: ControllerRegistration
+metadata:
+ name: extension-shoot-fleet-agent
+spec:
+ resources:
+ - kind: Extension
+ type: shoot-fleet-agent
+ deployment:
+ type: helm
+ providerConfig:
+ chart: H4sIAAAAAAAAA+19e3ejSLLn/O1PwXXvPXdmTlsG9Ki29/SetSxAwgZZCSSCPXv6JA8LRPIoQNZj5u5n30gk2fKryq7uWzUz13mqbUgyIyMjI34RkSTuGSmDMAvLk3BVh1kV59lJFeV5fXJLw7A+IbMwq0//9PsKD+VTt9v8hvL0d3MttDuC2BV7PVYv9HqfPv2J6/7Ocd9UFlVNSo77UwmT/lK7rz3/Jy2zt6x/KwppGs+yvAy/ZQy2wL1O59X1h2V/vP6iwLc7f+L4P3qyL5X/5uv/E3dD6joss4qrc267xtwyCjPOW8Q0iLMZVxA/AT2oWkc/cWYUV1y1KIq8rOEC9IJyM5p7XEpqP4LWP3NlSEkd34XQr44O6kkWAIEsnMHTPOP+XJThbbwKA24ZQ7t/+0uLG2d0zeVZ05OxxBVhydE4C1tHrYHxm1EDb0DiMk9TIIAvDS6Iy+qoNYvr0+bnlv2jlrcpT5uf+4podsp+7G+ru+z0gZAH81sU3G1Mw+ror61qWcBPjyTws07h+v9BU0zKOF9U3GggwYBFmc9Dvz5qxUFITrftoOqodVf5eRCeHv3oVX17eZP9X0akrFtrktJvGuNr9i+Cb3hs/3wPIOHD/r9DIUWMw5Kt+zl3JxyRoji4bfEt/iQI746CsPLLuKib+gtuCO6A85lScLd5ydVRyCk7PeIMpj2czLSHu2Daw91rVusoI2l4zr1F547uXmDjRwvrX7C8yf6D3G/N8m8e4yv2L/LdT0/iv0+fWPz3Yf//9eX0FNxgsQZPGdXcn/2/cCIvnHHGxQ1nSBzYNsmaG3IL7jEmdcj5eVqQbN3iLsD1N90qcPlVWN6FQWsbHzBPysFvGvugUuDhF1kQbmHiAoIJ+GXkt/WSQKRxvW3yM3fX4kRACj8sao5UXJbX0C+HLuUyroBa1nS/Hl1KOjDGRjg6PYV/ewovDHJPe4donNjiuT+zBse7R8d/+Z+MxDpfQJyyZoNyCxisvp/EjiEYnU0bBJD54TZeqR8GaDEazo5G7tUEmhPoUMDd7WFDjtQ7ppsS1XVxfnq6XC5bpOG4lZez053QqtPdXE+A610vK4MIhUn78yIuYcbemgO8hg7EA14pWTYLNitDeMaCuYxblhAUseCr2gmckQniqi5jb1E/EtqeR5j6YQMQG6jA8YXBjYxjrn9hjIyfGRF7ZA7HlsnZFwhd6OZIMrgx4i7H+mBkjsY63Mnche5wVyN98DMXxmwlQZwQ9MEMgM2YiRM0htEywvARC3ufUhWhH9/GPkwtmy0AirhZDm4ha4LSsEzjii1r1USWQIbGaVw3wWX1fF6tI2gyy89nDOyYHrdap/DvLsyCvDyF2DFaeC1Y5NM9ID5cRBAfnu77nfh5Vpc5pQCZZThjkmqGbFUR9wUI5Vq7AbGEDJDP7i5cEZBDePoaURZycdKe7PlzL8nmfbONz3cOOcyYNlTc4Vx3AXsj2F0lkyETj5+XJYSy3AMH3CMOjopD6h8e+F+tvMn/1yFoKShS9W07Qe/e/4H4/1P3Y//ne5R3rv9vkPED9Fatunh7LvjV+I9/mv+1ReFj/+e7lL/97YQLwts4g6iIZWfH3Ml//ufRmzI01hXcZ9Ph6JBO8xgcym0825I7OTk5Okw0n9E6YfEjOOrW/XhVa0ugtWel5dN8EZzeCYQWERGOkjgLzrd5ZpNmXjbNj3yIKpqbDLwaG+yI45KFF26pnXN/+xvXwoQuwqrVjK+RDDgoWw9tOGCYzSa+fbklJV5IK9Zqe/U6zV3Lv0Mw5jBHviPMZPb4srm+F1+cQu+t4Dhux0lEqptmv4w7riIidnvnx/djNu1bNWk43/Yoyjirb7njf6/+979XT1uWYZFXcZ2X6y+RAM7DlwiefzPBl6e9nzWMkEO0uvYpqapd8r8Vwk4EVZhC9SVLPyBxOP5fv3JCS+ic8MDNJcTPHuQndQxDX8FK7jStpcT1PvhnlCoIsoMFhdinlfwCTOagTkf3k31tDOF3j+GFNTkY6LVmO80+tKtXbYyGBCLckG61PA62ovqCuZ5se5zsuxyS/tEg9FF+WHmn/99CZEqKk8bC70CX8vKEZWUs1Qxf3iP+6vuf3hP/34b2wof//x7liadrVhU3qzreLyqD6ifbxFvnu3W5GimOUgC4gNSEedvtHi/4xDjz6SK4jytaQOdlrdl1qiDV3PZsIUApAkm7vq/eupGdwz1iuyekKBqnXWZhHTYA+qWBX+kSZ7D62Uujsj77GTVMV7891vJz7u9HB47/RcH9HVgJWPbfeez5fvSiH5R32n8QFjRfp1D5jtdBX7H/bvdT+2n8/6nb/rD/71GehuZgI9XpvYUP7pf7BRN/U5LwR5r2GwfkOHZcoVVFp82e1Tv6vRcg2AYlY7kM72JGdhhXLAq+ZhuR5xzfPGn2Zx8nCbvKy3wB0NDMvmqisrzczr95Z359IJA/QCTvnxzH7c1+x9WBAjT0sizfbbfuqzgOolo/qRbpaRX6ZbhnY5fdnRwkYfcY/ShZbAFkbvMboLGHbVbe6KaesfEQrzwI6EUf9IipPzfpDvc/WuZOAq0+SOaGnWc4flMIdPyXL09llwBtK+ijlf5D1vpbVpvj9urMyj4du2TpmP7e8XcLfuH7TMff3Z1tRhNIc8p7qZy8D3O2pVmfx9q2y61bhwvS1N0sKL3JwSzXj0x1m8cW9w8fKVmepgRQ8r7ihDt9F38n3MnOJH49DWv/9JGx7PSsUadHPVKyYr38RVmytK5k2xY+O7vy6wHfD9v5zfWusbHO/OpwCoxeFBJaR43FvJ/2QeevjRPEFXszcfAC5RHV3ePLh6dgQPM8zrjjn4+f0toe5DnJi3D7muLkAYxe43TbZbzvcXHf4SntJ3nySRz8eqhBzxPvR8oUZneHCrHV2mvpYiCh36Rr6ZK9HPtNv9Ak4+biUrpvyXF3jGW5zNPzg0qOu41DGqDw9nHtrp4h0vk9MLfuPe23wuae35F2oUgYmB2j38ZYQjYamc94Pee2/vUgMD59MVJ+HfmeMViGVb4o/fCR8txXnrOAe7eX9rzH37m6jNOHmFvgv4i6MIucLtJQYwhVPV+zlzzXgQhS1m0r/+e2e9CuBHVh59rOgbvF716YrzixV/h7tkxv4++RuLbCeobHX5TSNgY41Nxtjf5kLq9vBz8i+C6ZvVdi/j6XPWT31YzyC5nsvgThLVnQWssDINER+YNJ7AT7o0P/prwz/9vWHrimt4zxtf2fNi88yf86gtD9yP++R3lxY8dozPSFlO9dNvuWxG9P/kCjvpIeeL1OmPl7tP+H2kv5ZyzvtP9Hb2neCgBftf/e0/f/nU7v4/zfdymH9n9ody+/jmtC3S1G3BwmiN+6O7QLJIX7cyBH7GMCQgdb57kLSh6dPj7eD8w1rD0/gHw/3gvvmVvHP1re/2jlnfZfesTfNfDD8gH0v4gEX7P/Hv/0+5+e+OH/v095uv/LFrhFFnUENrbZHgF8eFW+e+1DQWZhiXIafilEqJ4c3Tj/8pbIH7oB/C27X8/7pM05juDEWz/uZWyVnnUsF5QlRSfQO1bKfFE0Mzjhjo+PHiWuTd02+6nYI0BUb1/91+MfmAp8g/2/+0Ogr9h/+9Mz+xc7nz6+//ku5Q+3/zfa6D/l+57XrP1VzHsOAv5WeNVjDJiFdfObxtX2YsleAf2ugR6aPrk9hRnWi7cxwK6K+6tFAUsdvg3s7pO/b5wp3B5MdquCL0gThJmn+8rmbFRc7+b8wqAAv6TZpLmfy8MEn3HwqiW8uqhlTsPqaYUHNhNns239Q4snj97JLLsKQDnfuhrH9y+tGu/z2BfBXXgHJvHwqHFv+9siD548IdvXOi85sufcsBfKL3H0cJLgKZ3fKwA/z0uQ65eXrDHsJ5LfDfg7Ce7rm32O7bM3vT84YORgunth7Mzkd6F1f6tv/91AG2a+e4GyX9cvCA5aPfdy7xJTtfDYx+GNn9hvpx2+DX2v0N+yjfbt/v+d8d9jBHhjJPi1/K/zqfN0/6fHf5z/+S7lKaIc7AE/UtqP4z/PkeVHr90fUd5p/3cFef/fgfiK/Qud9tPzf2K7y3/Y//coT16qsvXdfkEaPDn1ewzeMq988vzLBvF4BxnQto6hxU0eXOwah+U3IscJcPJG9NgfW3phKvtY7PDwzuO6LeTcHzhqKuPD99z3j7Zvrf/jr/9x/5I3jbMLSvNleHAGCGgVi4bZ+8+0j1/nqPVAogX92MfX+27HX5jI024HJwXYOb00L9ffxMK267dwsev59GMfjgPjghD2/gDLS0dNWf2z46YPr9/fCN/bmPlwUbc12xfwB4eC2EQOG7ce2v3DntL+rytvwv+7reS+9Q8AfRn/hXZP+PTs7399Ej/w/3uU7RnJBhb3H/Gdc3NyR1LiR+VbzzPWZHbONRECuysOzlJe0CVZV0dHj7Jlhi7QdIuyDV4ed/mUbQPco9dxr6PFrKb5qw5fbCmIv7CmR2DYrNnOfd0faPrpBcT/6Rl6//SA3QLPp/c1+1GaQb4KM8fM7wErBwcPd6cbDk9GAjWofPVA4jl3S2jFWD84XXnvp54SOjr85PTp167OtF/gFK99kd5583wWzKUrR1wJfhtRP9N6lzE/u7+f51ejy4vZSNHhGSrclM6dKaLXtiwESnTnp5QPDSGBumgc9zVr0+dNWVtNBBW7UmERS5Us6l5hrGM7KZAmudii2jKQVqY2RAOyka8sAWONqrKZIGTLqmTP+4om6NhIVKIpKwPbdeXYWB5PXaKlk5VjC8SgztIb4gi1kWLRyEAKLlFyhjwbrR0hyP1Uxs4GJ2GiDizsKiFV4TlG2ETXWIb7dLI0qTrFmP3nKgirjivVkaessIWDBImoN5nDJVUvXctaYYwcInUpEes2lqMlEijMT70xlNXYSlxblxGG3xSZkgj8LM0UV2juQv9CdIfBEmFXxolgYctdW/O+HMouxokK7WUJC+gKpZO1q3RNLAI9jC4NGQSURYZmda9suxgjDCXREYaxLIxlTdaxmeBEk5yNPVcTA2SKMtfEVvcSWo41Xq91K4o9q9s2B/0OTs6mxlyPwqTbMzY6CrH62R0WOqJqZFNETFnFKHGxNse2K7uXptRZwXqMteFk6fABrwnqAmX9GLWjK+DXCGw0DRXBmmxmKyddAZ9wbdVzZLmigyMF1mGIMkw8rE4sU72yqbY2k8BA7Qlv0qDSLX6pDTAx5KJjp/VSs6WlkRbwPNJMHhNNQLW3wTcYFwNnri6JENTatADulrzF43GI0WeUnhnWtJ87GxVrIh5a0yIi6Uo1LdcmAls72QiTztI0dTnE7nCMkYVMnLvYzTEORrbZh/m7bUwj4uMc5KfG3lRbOmmtGLxQIlpEtuzaNnWXuiKtwwHINSn4iVgJuoIWKK1NxNMurEfbT7qLhn/JvbESnNu2XhJZj0Gvrm1Trgh1h2YCumavJIePbGYfxlxFeNrfmILPh1ieBnLf0PiiA+OroVVtcFobSHZdG/tLQ1Qrf4B1W1l1LSEgOtgDkQPQia7pSlEC8xiCPsUkcUunXRiGUNiTBM21ad9yreAKdH4E+piQFE3cgYzBHhZ2KoB8oO1GBWpYwSk1SLs/tOyamKna9+W+S8wI9EfPPRp8tqfAirJc2qZKJqLcs4Geh91Ly16pIQ2GmlXopI0My+5eWcpoaWdBpGX9oWkFd2FyZpvQH6WrSzvBV5pQTFFWzEnSrR3BhRBerexMH3h2pFqmbgeys7IoGAkdgX73NwwPrGmEPZNOwb4NIsgl2KPryYUD9trWaKC4G0Q8S+g6qVD50nLpy0GCBdBzu1rhVK0xdTWNH4mANx1P1HtEVm8wL1w75sUqtISpK+EpAryxp8XlJNVrTwar38hThyLFU2RspDICPJlaYje3RFjfoTw2pG7PBbzAtmxb4OCAAgJ7uNLkoEJppONphMxpUQUpLo2pSohQjEw7WGopku2siDSsEtCHK9OWF+7QnYNcbLCTTLdlGWU69nBh2zhYeny1NhJMCW8J5rAgBHgzp3SOs/4Uz1We2MCvUiPACwVLwZVu1T2inEUY9AXLyA4TvdZMF082ycoSi0sjRcNgoCe21F3igaqHNlpYFI3ZvWtFekj1IfS3AdVWduILgBM9IumE8F1kAh4T2f1sJfUYyRLgrZtrqS4Dzk8980Kw2oUN83MswAMP8M5V6itLWq4D2TVAJibDO02Qlr4kUC9d9WwczTWMHdOUsWcVJszfDSisRqZSQgvJtZDRrH+iYsDPgZMx/ierLZ7TgT2/EDCzSBMht92/tNMiR1QGvO8yf9PBGOzDnqw8CdueRNs4weAz9MHYAjylzsZiz6lcjE0Xgb4c0OsbgD8L8A84lKqV06Y6slc3TuLehdRZTRJm+oXL8MCwujWRVpG1oaJj6ksC/sVi/mXa15y5DNgmj4Avi/CuYVHcmYDPA/7Bn6ErsDdlAm1DmUZIgrlt1Hloq0PDVHXNcnh3SDdB0rXJBtuaDT4YB7oB+q+BGwjTiPkrotFk4w9UpPFqB4vdypcnSyIVBuA5b4nClY7lBRnSuZf+snQsNDZFVOLMTTQK6wf2FkpnlpUUCUnyNeglrLg6dNvU0Aaya5n9jq+ombfpz8nGWsJ68jp2bbZemqKtMPQ3FaiRqA36dwn8uEFSLd2NbHnTyDFxlGkyGk3muutN9TVWki4WgqmZuXM0RWuGLw4uht6woIAHV3BvhLI6BNtmeACY2SUTwf0M/QF/kw3oi+FjZx0qBcU87tmm3vfBv0B7HUtuj8UDhpysAe+mSCiWWALnlEoby0Rj0E/e4TEP/rkGfMc4DTpsPQNeAHyLrsGPXZsY5YCrsD5nOvhSEYP/DGS9B/g6RvZZ15kWiilP1oCfloetJYsH/FQfWenKhjjmBvAF5C0Bfp/dwBgO+G0+AH/hDlBkK1HbgfUgomzZFOy5HYHcZfB/8ucJ2LeX9UEzfNC/QmH4HYI/gvlAvEM/o6nqekl3gvkoNwAf/HZAbQXlgMcdkupD0Ks5wsVnzAdJII+WYxxZ4O8NS1wphowlbMqWxvzQQM1CSS/B3yBsR0vLvOgiq1v6kgv0Cx78FeC6PrWTMwAv0NR0pdjYnYZD3SZiJEJ8kti06JGByvC2A0ZAgN8S4i2Cpa4FfhjWh/kHHGPAB2uuXyLwVWOp1sH2uma6GttYhfigAHxCBOxPwYouewPsanO5A7En8UWIhyTE4p2Nmbgy+A/HHSIMcYUC+nCJRVRNaBSDfvacBPOIYoXZhze9WLo4utNl3TYynUymIM+0WtmKqtgmcnEC8hHrO8AXGeIj1zMnG+gP8gqGY3tlA96YgBfgrwuYpZogQZ3gYZEEtCjtpIb4A312EqTq2FmBvcUhpWvgV7Fs5HiSO/Yg6pyIXeLbqDKpbhIRWRCvdDTpDOITiK9owfBT1pJuiadBrCWdNayPDus/9SUV9Lk7tmgwxtbZcDxVDcB3CAqxHdiqCQ/mnvLLBuK5K/CXtWH2YyIuN6CPpSHqeDzou9rgYoWnRcbiq7GpYw27ugN4hihl/hbi29napP7KtmXHylwthNjcBH2zFN32FYgHxVUPHOrYTLrTQIog/kO1aSESKHjob7Ab0tHSgaA5TATkD/tTsF8FWwHTh8+gJ2OQ7woiW8WU9AL6NvmARS0BgTOHdcSerF5ZaV0hGfxl4u7zhaWxzGcsz/BFvAZfuYZcIg+GaOlv8rtr8Wzpr3leH1ysNXEkOutu7rQvFoGIN8CX4KdnS9foRmEqlOOhtiD2Lz19MNlcxhczL5Vr18xnXru/uUz1Oy9DNByizXiW15DT7O4nPdZ2dNmfQ07BaLLx7/yhts19oM6djnojJYj8FIHPwutr260gGuIhD8pcu8u/MJbgg//yskntiGcLoNkJBvtndeGlk95IgvFTt3CXBeAHTt0ptE8hz9rks7DNXwVT/T4f29N2RRn8KV4AP7WbrsDjAn07YOMAr32ByQ/m1sgyUM5K1+4A37hrpFGMRL0wRcryrZ5hymNi0Y1hC7wPOGuYrgq5D9NtA2LpGizC1WTmewOCUjxy2lEEsdEQfOPKwXIdyoWFpO7nBisVeQq4GYMvT01YAx2iBiLS1DK6NLSKpbPRdWKdFUamgqUWlSPygqegIc4KiMe6cA/2AniN08gFuxBd2Rc0iRaBSCNNcAubRlcBhjgvXc1DEy+dLOmGgKsQP0dgLxXER2OSjjY4w8lkQwWIl1eeom0mjB7FzZxJqo5gPLBntwt+XPV5Wgfg2yB+ZfGiDXN1zCmO8CYSIZ+wIR6CeKogmHc/2+aoq9nqAux3Dvj5GfDb1qRVrdtRjIUtfY9Sx0hrgjcY5pOsPPDfYA/Aj5xDfgb2iwYa2KuWocpJk5WeIuh/Ng93a6BhWuOMxoAfS5ivHgAejIfNeF1XERgeTVFaAD4mkG91UZBqa2ifePbqM+RWKwLyA3+aYPDPpsILRGG5Br0hVrEAf3kFcedUt1dzSPRqJwlskP9AsyOIG1zBlfTfI489fybQO1i/1QKwi0z4RDTsQm/wz8RjwPvalNElwYVkThHEjxZv8sC/1ZWsVNCwQLumgBJiCRL4Y5fF7xDnMv4W5lx2icXodce6rW1AH2I87PeYfmipPAW/4oI+fHbmOpO3BHgWG7Lbc6UA+Nd7vtJ1bQlwnPqgf4BPthCDPlSuxOSXrxv94fPudSqMkRANIWbuanLUN01Xs01qmJIawXVJ6Bkyhn0Fsq/It7odO6GuCb44SAVsSODLTbmnS/haM6MxxEqqPYDsTIqGZjtKvCHFtiXfWvZMwO3gltB+FihqMZYSUU/12IdIdSrmK+gH0UHRMWwd4le/O233I50HqlZxF4LPCyw9DjNVQ3P9FvOqzHYXLEHNbKvGxEJzWBsbcDG1bDwZ40kdYJC0HSROUlyHWV/yzcgN+GgCsdA8SNUuaQOOmJGl2bo+FYJLbUhrwgfoehpdOpZ6hYYF6Il8Bx5laeFJRx+65Rj/stJlKnmCXAOWi1qyAjTV0cSKPvtmAHG6Jk6m6txOzyYG5P3g6FeQD4+Q4ir+IEgndr7B8kSYpDLWQQewqN+SDXKuseoEsrqxM6djpZS3BvjGgnwY1nRNpFqxYwFkH43IVOeRcKb7U4jRqAv+OYrHVmC6NNh4tFhAPp9BXmR6A/CjeLSyBxCHUXU0HtC5s0l40s7XuuLyviK0J5A36MC7MUAb115pKOtXHl8XtqzVfobavmJ1jKmcGGIwMCXZtdPupaYIt4YAcQWWBF+Sx1iM5NCWO1qCKxNyUQPGC2WrY9qqAXFhBvFiZE/Va4iLu04y6WqZKoANl4gHOhDkOaJ+7Quz2p0H84mAXDPrX7qpMCLDiw22Im2SqldYAslbHRHmsLq28WdTiu4Av4mergbXZnCjJUUfcoRLrR2NwwxPvMFMMIbqyqejlQu5kJH80vHm8ohIk2VgB244dNaoiUsiB/heYFlXIG/umFTuBjSS/TkeO4Iao2lwaybCnWlFoicUIhYFkOGsrYkqm98dxNl8YFtdN3UNiJMzsEfDplZ3KowED1MJ9COGHKyL7RVCQ3dh8GihU7VzjfX+td2FTNeVzam6sRQhCQcXdZjygp25KLT08nKW/3pwGuDo+Ufh59z/+b9HRz9xL339yf4kD9u13X6FfN5cP/5QdXuiu3nBtN3e3W4Mo4O975f+FPArnQ+3zMNFa+aX7KXo/Zb57v8S8fDN5P5P/G4JlTsqzd75Md8SW2c/8iD4R/koH+WjfJSP8lE+ykf5KB/lo3yUj/JR/mXL/wdqr+T5AHgAAA==
+ values:
+ image:
+ tag: v1.0.0-dev
+ fleetManager:
+ kubeconfig: #base64 encoded kubeconfig pointing to Fleet manager cluster with correct access rights(rw secret/cluster.fleet.cattle.io
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..576016e
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,46 @@
+module github.com/javamachr/gardener-extension-shoot-fleet-agent
+
+go 1.15
+
+require (
+ github.com/ahmetb/gen-crd-api-reference-docs v0.2.0
+ github.com/gardener/etcd-druid v0.3.0 // indirect
+ github.com/gardener/gardener v1.15.1-0.20210115062544-6dc08568692a
+ github.com/go-logr/logr v0.3.0
+ github.com/gobuffalo/packr/v2 v2.8.1
+ github.com/golang/mock v1.4.4-0.20200731163441-8734ec565a4d
+ github.com/karrick/godirwalk v1.16.1 // indirect
+ github.com/nwaples/rardecode v1.1.0 // indirect
+ github.com/onsi/ginkgo v1.14.1
+ github.com/onsi/gomega v1.10.2 // indirect
+ github.com/pierrec/lz4 v2.5.2+incompatible // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/rancher/fleet/pkg/apis v0.0.0-20200909045814-3675caaa7070
+ github.com/rogpeppe/go-internal v1.7.0 // indirect
+ github.com/sirupsen/logrus v1.7.0 // indirect
+ github.com/spf13/cobra v1.1.1
+ github.com/spf13/pflag v1.0.5
+ github.com/ulikunitz/xz v0.5.7 // indirect
+ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
+ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a // indirect
+ golang.org/x/sys v0.0.0-20210123111255-9b0068b26619 // indirect
+ golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
+ golang.org/x/tools v0.1.0 // indirect
+ k8s.io/api v0.19.6
+ k8s.io/apimachinery v0.19.6
+ k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
+ k8s.io/code-generator v0.19.6
+ k8s.io/component-base v0.19.6
+ k8s.io/utils v0.0.0-20200912215256-4140de9c8800 // indirect
+ sigs.k8s.io/controller-runtime v0.7.1
+)
+
+replace (
+ k8s.io/api => k8s.io/api v0.19.6
+ k8s.io/apimachinery => k8s.io/apimachinery v0.19.6
+ k8s.io/apiserver => k8s.io/apiserver v0.19.6
+ k8s.io/client-go => k8s.io/client-go v0.19.6
+ k8s.io/code-generator => k8s.io/code-generator v0.19.6
+ k8s.io/component-base => k8s.io/component-base v0.19.6
+ k8s.io/helm => k8s.io/helm v2.13.1+incompatible
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..c7bcfde
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,1695 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE=
+github.com/Azure/azure-sdk-for-go v31.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v39.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v42.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
+github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
+github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
+github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
+github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
+github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U=
+github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
+github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/ahmetb/gen-crd-api-reference-docs v0.1.5 h1:OU+AFpBEhyclrQGx4I6zpCx5WvXiKqvFeeOASOmhKCY=
+github.com/ahmetb/gen-crd-api-reference-docs v0.1.5/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM=
+github.com/ahmetb/gen-crd-api-reference-docs v0.2.0 h1:YI/cAcRdNAHArfhGKcmCY5qMa32k/UyCZagLgabC5JY=
+github.com/ahmetb/gen-crd-api-reference-docs v0.2.0/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alessio/shellescape v0.0.0-20190409004728-b115ca0f9053/go.mod h1:xW8sBma2LE3QxFSzCnH9qe6gAE2yO9GvQaWwX89HxbE=
+github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20180828111155-cad214d7d71f/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
+github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190603021944-12ad9f921c0b/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
+github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/appscode/jsonpatch v1.0.1/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.13.54/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
+github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
+github.com/aws/aws-sdk-go v1.19.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.33.18/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/bugsnag/bugsnag-go v1.5.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
+github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/cloudflare-go v0.11.4/go.mod h1:ZB+hp7VycxPLpp0aiozQQezat46npDXhzHi1DVtRCn4=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190823190603-4a2f61c4f2b4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
+github.com/deislabs/oras v0.7.0/go.mod h1:sqMKPG3tMyIX9xwXUBRLhZ24o+uT4y6jgBD2RzUTKDM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s=
+github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
+github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
+github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=
+github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
+github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=
+github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w=
+github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.11.1+incompatible h1:CjKsv3uWcCMvySPQYKxO8XX3f9zD4FeZRsW4G0B4ffE=
+github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7-0.20200730005029-803dd64f0468/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
+github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
+github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/frankban/quicktest v1.5.0 h1:Tb4jWdSpdjKzTUicPnY61PZxKbDoGa7ABbrReT3gQVY=
+github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/gardener/controller-manager-library v0.1.1-0.20191212112146-917449ad760c/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k=
+github.com/gardener/controller-manager-library v0.1.1-0.20200204110458-c263b9bb97ad h1:fX6pN0Xv+1A+2xYmFvG7Qd7W+3bLfd98sRl1TZoRNes=
+github.com/gardener/controller-manager-library v0.1.1-0.20200204110458-c263b9bb97ad/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k=
+github.com/gardener/controller-manager-library v0.2.1-0.20200810091329-d980dbe10959/go.mod h1:XMp1tPcX3SP/dMd+3id418f5Cqu44vydeTkBRbW8EvQ=
+github.com/gardener/etcd-druid v0.1.12/go.mod h1:yZrUQY9clD8/ZXK+MmEq8OS1TaKJeipV0u4kHHrwWeY=
+github.com/gardener/etcd-druid v0.1.15/go.mod h1:BHXG8N04Dl4On7Ie6cErwmpvzncNrmeb+HO7Sqrhf+A=
+github.com/gardener/etcd-druid v0.3.0 h1:rqOR8UPKT9tywPYowEaVAhSfYgz165whJORsijz9Tps=
+github.com/gardener/etcd-druid v0.3.0/go.mod h1:uxZjZ57gIgpX554vGp495g2i8DByoS3OkVtiqsxtbwk=
+github.com/gardener/external-dns-management v0.7.3/go.mod h1:Y3om11E865x4aQ7cmcHjknb8RMgCO153huRb/SvP+9o=
+github.com/gardener/external-dns-management v0.7.7 h1:J0CEkjPqGCvDtHxOCDLAvTa/1I/6GPjjavb6028lMOY=
+github.com/gardener/external-dns-management v0.7.7/go.mod h1:egCe/FPOsUbXA4WV0ne3h7nAD/nLT09hNt/FQQXK+ec=
+github.com/gardener/external-dns-management v0.7.18 h1:15uIyFfZSbR8fivnXvqb1Dvv4QqzfNYxEFUQ9K+mpsE=
+github.com/gardener/external-dns-management v0.7.18/go.mod h1:oHhauLQ3/sop0c1urS6n304Wqv/WM4me0geLn9nTAcY=
+github.com/gardener/gardener v1.1.2/go.mod h1:CP9I0tCDVXTLPkJv/jUtXVUh948kSNKEGUg0haLz9gk=
+github.com/gardener/gardener v1.3.1/go.mod h1:936P5tQbg6ViiW8BVC9ELM95sFrk4DgobKrxMNtn/LU=
+github.com/gardener/gardener v1.4.1-0.20200519155656-a8ccc6cc779a/go.mod h1:t9oESM37bAMIuezi9I0H0I8+++8jy8BUPitcf4ERRXY=
+github.com/gardener/gardener v1.11.3/go.mod h1:5DzqfOm+G8UftKu5zUbYJ+9Cnfd4XrvRNDabkM9AIp4=
+github.com/gardener/gardener v1.12.1-0.20201120162402-fa6dd6fe6393/go.mod h1:13i2DUTf2LH13yVtcPfbY6IZ9vZ1/o6Iu8ajXeR+lS4=
+github.com/gardener/gardener v1.15.1-0.20210115062544-6dc08568692a h1:P6ARGDyf0cGubN5iIJ/OfJSNjn9bV0J3dQQaXwxYJqM=
+github.com/gardener/gardener v1.15.1-0.20210115062544-6dc08568692a/go.mod h1:nEATFWChfH+AD88X70hZ1uI/Wg0uiwScwD0kTc0PFMY=
+github.com/gardener/gardener v1.15.1 h1:XDVULARRDYdN00Ils2PR/ojk41EGNWbomjZ8PLQmcSY=
+github.com/gardener/gardener v1.15.1/go.mod h1:B6mu7i9OusC+k3DFGuiwt99xU7G4IoiwphBRsqr0lC4=
+github.com/gardener/gardener-resource-manager v0.10.0 h1:6OUKoWI3oha42F0oJN8OEo3UR+D3onOCel4Th+zgotU=
+github.com/gardener/gardener-resource-manager v0.10.0/go.mod h1:0pKTHOhvU91eQB0EYr/6Ymd7lXc/5Hi8P8tF/gpV0VQ=
+github.com/gardener/gardener-resource-manager v0.13.1 h1:BiQ0EMO58663UN7lkOXRKV4gt5bEBIk80nxxokAboxc=
+github.com/gardener/gardener-resource-manager v0.13.1/go.mod h1:0No/XttYRUwDn5lSppq9EqlKdo/XJQ44aCZz5BVu3Vw=
+github.com/gardener/gardener-resource-manager v0.18.0 h1:bNB0yKhSqe8DnsvIp3xZr9nsFB4fm+AUAqj1EoIvWU8=
+github.com/gardener/gardener-resource-manager v0.18.0/go.mod h1:k53Yw2iDAIpTxnChQY9qFHrRtuPQWJDNnCP9eE6TnWQ=
+github.com/gardener/hvpa-controller v0.0.0-20191014062307-fad3bdf06a25 h1:nOFITmV7vt4fcYPEXgj66Qs83FdDEMvL/LQcR0diRRE=
+github.com/gardener/hvpa-controller v0.0.0-20191014062307-fad3bdf06a25/go.mod h1:yj7YJ6ijo4adcpXQKutPFZfQuKLdM5UMZZUlpbM3vig=
+github.com/gardener/hvpa-controller v0.2.5 h1:emP1t6hHdFcnvqv698MdBcXz/1JOLZGiAaRHQDSyZBM=
+github.com/gardener/hvpa-controller v0.2.5/go.mod h1:rjsb3BPKJFMluudZ8/bhCCDQfFCF/0Um+rzXQI/MmfI=
+github.com/gardener/hvpa-controller v0.3.1 h1:VsOdcKZMcZDlUNVbFY8oqlKrb1GSCdmzPooKT/Tyi+Y=
+github.com/gardener/hvpa-controller v0.3.1/go.mod h1:rjsb3BPKJFMluudZ8/bhCCDQfFCF/0Um+rzXQI/MmfI=
+github.com/gardener/machine-controller-manager v0.27.0 h1:YSPaX1ILVR8OGRxRWnCFP4tYqNgQh4Wrbvi9hwblbWg=
+github.com/gardener/machine-controller-manager v0.27.0/go.mod h1:zlIxuLQMtRO+aXOFsG6qtYkBmggbWY82K7MSO051ARU=
+github.com/gardener/machine-controller-manager v0.33.0 h1:58Gh4MW7Yv9XoARKhP4wORDcn2Hofbuv/1OlMe9y1eY=
+github.com/gardener/machine-controller-manager v0.33.0/go.mod h1:jxxE+mGgXwg4iPlCHTG4GtUfK2CcHA6yYoIIowoxOZU=
+github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.36.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
+github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
+github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE=
+github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=
+github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
+github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
+github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
+github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
+github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
+github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
+github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE=
+github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
+github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=
+github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
+github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
+github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
+github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
+github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
+github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
+github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
+github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
+github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
+github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
+github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
+github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
+github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
+github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
+github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
+github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM=
+github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
+github.com/gobuffalo/logger v1.0.3 h1:YaXOTHNPCvkqqA7w05A4v0k2tCdpr+sgFlgINbQ6gqc=
+github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM=
+github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
+github.com/gobuffalo/packd v1.0.0 h1:6ERZvJHfe24rfFmA9OaoKBdC7+c9sydrytMg8SdFGBM=
+github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI=
+github.com/gobuffalo/packr v1.30.1 h1:hu1fuVR3fXEZR7rXNW3h8rqSML8EVAf6KNm0NKO/wKg=
+github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk=
+github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw=
+github.com/gobuffalo/packr/v2 v2.8.0 h1:IULGd15bQL59ijXLxEvA5wlMxsmx/ZkQv9T282zNVIY=
+github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g=
+github.com/gobuffalo/packr/v2 v2.8.1 h1:tkQpju6i3EtMXJ9uoF5GT6kB+LMTimDWD8Xvbz6zDVA=
+github.com/gobuffalo/packr/v2 v2.8.1/go.mod h1:c/PLlOuTU+p3SybaJATW3H6lX/iK7xEz5OeMf+NnJpg=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4-0.20200731163441-8734ec565a4d h1:izNDUPqGqkeNSYeebPs9caowE15dhr4m59/68323beM=
+github.com/golang/mock v1.4.4-0.20200731163441-8734ec565a4d/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
+github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
+github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
+github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
+github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
+github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
+github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk=
+github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
+github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
+github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
+github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
+github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
+github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
+github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
+github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
+github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-github/v28 v28.0.0/go.mod h1:+5GboIspo7F0NG2qsvfYh7en6F3EK37uyqv+c35AR3s=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
+github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM=
+github.com/gophercloud/gophercloud v0.7.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
+github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg=
+github.com/gophercloud/utils v0.0.0-20200204043447-9864b6f1f12f/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gosuri/uitable v0.0.1/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
+github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
+github.com/goware/prefixer v0.0.0-20160118172347-395022866408/go.mod h1:PE1ycukgRPJ7bJ9a1fdfQ9j8i/cEcRAoLZzbxYpNB/s=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.11.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-getter v1.4.1/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
+github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
+github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
+github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
+github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q=
+github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
+github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
+github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
+github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
+github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
+github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
+github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/infobloxopen/infoblox-go-client v1.1.0/go.mod h1:BXiw7S2b9qJoM8MS40vfgCNB2NLHGusk1DtO16BD9zI=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/karrick/godirwalk v1.15.3 h1:0a2pXOgtB16CqIqXTiT7+K9L73f74n/aNQUnH6Ortew=
+github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/karrick/godirwalk v1.15.8/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
+github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
+github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI=
+github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc=
+github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY=
+github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
+github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=
+github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
+github.com/miekg/dns v0.0.0-20181005163659-0d29b283ac0f/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
+github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
+github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mozilla-services/yaml v0.0.0-20191106225358-5c216288813c/go.mod h1:Is/Ucts/yU/mWyGR8yELRoO46mejouKsJfQLAIfTR18=
+github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
+github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
+github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ=
+github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
+github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
+github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
+github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
+github.com/packethost/packngo v0.0.0-20181217122008-b3b45f1b4979/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk=
+github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=
+github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI=
+github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
+github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
+github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4=
+github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
+github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA=
+github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
+github.com/rancher/fleet v0.1.0 h1:C+L6ugivK5OUitaMf2hf8qmzoQWokEtd2SFdEAjMEYo=
+github.com/rancher/fleet v0.1.0/go.mod h1:JHup9BY4RM58JhD3zxZIFV2Tl5zHAjmQqpbRRFgS5us=
+github.com/rancher/fleet v0.3.3 h1:tteco3/mFB+3x3AWmnJ8gnKxdTu2jaPxlvZLCWOUbNg=
+github.com/rancher/fleet v0.3.3/go.mod h1:tQYQFNvBw+kvwgvdsqU9dZAfxmCboSCtKaOj4Xr8PYU=
+github.com/rancher/fleet/pkg/apis v0.0.0-20200909045814-3675caaa7070 h1:ABrvFazWllKFAxBJ07DXKhTOjyYYCOfaslsJAK/Nj0c=
+github.com/rancher/fleet/pkg/apis v0.0.0-20200909045814-3675caaa7070/go.mod h1:Taid9n2/wUWWIeLXgP5/zoKhk1f7GDfwjUg22+Ca4GE=
+github.com/rancher/fleet/pkg/apis v0.0.0-20210122160047-1647f9074325 h1:ajNVuzKRXxJPvg14cVQHUCuQiCFm9fNGtEumCS//ndY=
+github.com/rancher/fleet/pkg/apis v0.0.0-20210122160047-1647f9074325/go.mod h1:Taid9n2/wUWWIeLXgP5/zoKhk1f7GDfwjUg22+Ca4GE=
+github.com/rancher/gitjob v0.1.5/go.mod h1:vKRDH/2u4qBcgjeJLuu3wXR+pwk0jKWV7igC/FgZNbc=
+github.com/rancher/lasso v0.0.0-20200820172840-0e4cc0ef5cb0/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
+github.com/rancher/lasso v0.0.0-20200905045615-7fcb07d6a20b/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
+github.com/rancher/wrangler v0.6.0/go.mod h1:L4HtjPeX8iqLgsxfJgz+JjKMcX2q3qbRXSeTlC/CSd4=
+github.com/rancher/wrangler v0.6.1 h1:7tyLk/FV2zCQkYg5SEtT4lSlsHNwa5yMOa797/VJhiQ=
+github.com/rancher/wrangler v0.6.1/go.mod h1:L4HtjPeX8iqLgsxfJgz+JjKMcX2q3qbRXSeTlC/CSd4=
+github.com/rancher/wrangler v0.6.2-0.20200829053106-7e1dd4260224 h1:NWYSyS1YiWJOB84xq0FcGDY8xQQwrfKoip2BjMSlu1g=
+github.com/rancher/wrangler v0.6.2-0.20200829053106-7e1dd4260224/go.mod h1:I7qe4DZNMOLKVa9ax7DJdBZ0XtKOppLF/dalhPX3vaE=
+github.com/rancher/wrangler v0.7.3-0.20201002224307-4303c423125a h1:rx2kSz+dGj6mcZ3jIXBhXFKTUU5KCBIKQ3PM8ycwWLo=
+github.com/rancher/wrangler v0.7.3-0.20201002224307-4303c423125a/go.mod h1:goezjesEKwMxHLfltdjg9DW0xWV7txQee6vOuSDqXAI=
+github.com/rancher/wrangler-api v0.6.0/go.mod h1:RbuDkPNHhxcXuwAbLVvEAhH+UPAh+MIkpEd2fcGc0MM=
+github.com/rancher/wrangler-cli v0.0.0-20200815040857-81c48cf8ab43/go.mod h1:KxpGNhk/oVL6LCfyxESTD1sb8eXRlUxtkbNm06+7dZU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.5.2 h1:qLvObTrvO/XRCqmkKxUlOBc48bI3efyDuAZe25QiF0w=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.7.0 h1:3qqXGV8nn7GJT65debw77Dzrx9sfWYgP0DDo7xcMFRk=
+github.com/rogpeppe/go-internal v1.7.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
+github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs=
+github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk=
+github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/texttheater/golang-levenshtein v0.0.0-20191208221605-eb6844b05fc6 h1:9VTskZOIRf2vKF3UL8TuWElry5pgUpV1tFSe/e/0m/E=
+github.com/texttheater/golang-levenshtein v0.0.0-20191208221605-eb6844b05fc6/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
+github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
+github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
+github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
+github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
+github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
+github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
+github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
+github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
+github.com/vektra/mockery v1.1.2/go.mod h1:VcfZjKaFOPO+MpN4ZvwPjs4c48lkq1o3Ym8yHZJu0jU=
+github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0/go.mod h1:2rx5KE5FLD0HRfkkpyn8JwbVLBdhgeiOb2D2D9LLKM4=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xenolf/lego v0.0.0-20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY=
+github.com/xenolf/lego v0.3.2-0.20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY=
+github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
+github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
+github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yujunz/go-getter v1.4.1-lite/go.mod h1:sbmqxXjyLunH1PkF3n7zSlnVeMvmYUuIl9ZVs/7NyCc=
+github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
+go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mozilla.org/gopgagent v0.0.0-20170926210634-4d7ea76ff71a/go.mod h1:YDKUvO0b//78PaaEro6CAPH6NqohCmL2Cwju5XI2HoE=
+go.mozilla.org/sops/v3 v3.6.1/go.mod h1:3KLncZfyE0cG/28CriTo0JJMARroeKToDIISBgN93xw=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg=
+go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM=
+go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
+go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
+golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587 h1:5Uz0rkjCFu9BC9gCRN7EkwVvhNyQgGWb8KNJrPwBoHY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 h1:e6HwijUxhDe+hPNjZQQn9bA5PW3vNmnN64U2ZW759Lk=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190911201528-7ad0cfa0b7b5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU=
+golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44=
+golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210123111255-9b0068b26619 h1:yLLDsUUPDliIQpKl7BjVb1igwngIMH2GBjo1VpwLTE0=
+golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
+golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191017205301-920acffc3e65/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371 h1:Cjq6sG3gnKDchzWy7ouGQklhxMtWvh4AhSNJ0qGIeo4=
+golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200323144430-8dcfad9e016e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200408032209-46bd65c8538f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43 h1:Lcsc5ErIWemp8qAbYffG5vPrqjJ0zk82RTFGifeS1Pc=
+golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5 h1:UaoXseXAWUJUcuJ2E2oczJdLxAJXL0lOmVaBl7kuk+I=
+golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
+gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
+gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
+gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=
+gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190128161407-8ac453e89fca/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v1 v1.1.2/go.mod h1:QpYS+a4WhS+DTlyQIi6Ka7MS3SuR9a055rgXNEe6EiA=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+helm.sh/helm/v3 v3.0.0/go.mod h1:sI7B9yfvMgxtTPMWdk1jSKJ2aa59UyP9qhPydqW6mgo=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+istio.io/api v0.0.0-20201123152548-197f11e4ea09/go.mod h1:88HN3o1fSD1jo+Z1WTLlJfMm9biopur6Ct9BFKjiB64=
+istio.io/client-go v1.8.1/go.mod h1:Qymv71lwIqjDTkaE2NqBYLL+Bl5KsCfzEDhntXypHYY=
+istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a/go.mod h1:OzpAts7jljZceG4Vqi5/zXy/pOg1b209T3jb7Nv5wIs=
+k8s.io/api v0.19.6 h1:F3lfwgpKcKms6F1mMqkQXFzXmme8QqHTJBtBkev3TOg=
+k8s.io/api v0.19.6/go.mod h1:Plxx44Nh4zVblkJrIgxVPgPre1mvng6tXf1Sj3bs0fU=
+k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
+k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
+k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65/go.mod h1:5BINdGqggRXXKnDgpwoJ7PyQH8f+Ypp02fvVNcIFy9s=
+k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto=
+k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8=
+k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
+k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
+k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
+k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M=
+k8s.io/apiextensions-apiserver v0.18.8/go.mod h1:7f4ySEkkvifIr4+BRrRWriKKIJjPyg9mb/p63dJKnlM=
+k8s.io/apiextensions-apiserver v0.18.10/go.mod h1:XOE93YaGrb8Pa+ro00Jx3fhzRJ7UB0bU37jRTQXpTOM=
+k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA=
+k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg=
+k8s.io/apiextensions-apiserver v0.19.6 h1:LL7H65E2VTBfxmsWQZth60zzWVtbSN2gWMEWfsuDvIQ=
+k8s.io/apiextensions-apiserver v0.19.6/go.mod h1:9s8ceL67UJAD1ewbsn07tkQ7/EGjiKOedKyiUCVXJgQ=
+k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA=
+k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q=
+k8s.io/apiserver v0.19.6 h1:nRjEbEyX0diwAZT7GndrCUwX9OQw8v+CCHPZwYLBwK8=
+k8s.io/apiserver v0.19.6/go.mod h1:05XquZxCDzQ27ebk7uV2LrFIK4lm5Yt47XkkUvLAoAM=
+k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e h1:5AX59ZgftHpbmNupSWosdtW4q/rCnF4s/0J0dEfJkAQ=
+k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA=
+k8s.io/cli-runtime v0.0.0-20191016114015-74ad18325ed5/go.mod h1:sDl6WKSQkDM6zS1u9F49a0VooQ3ycYFBFLqd2jf2Xfo=
+k8s.io/cli-runtime v0.0.0-20191214191754-e6dc6d5c8724/go.mod h1:wzlq80lvjgHW9if6MlE4OIGC86MDKsy5jtl9nxz/IYY=
+k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
+k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
+k8s.io/cli-runtime v0.18.4/go.mod h1:9/hS/Cuf7NVzWR5F/5tyS6xsnclxoPLVtwhnkJG1Y4g=
+k8s.io/client-go v0.19.6 h1:vtPb33nP8DBMW+/CyuJ8fiie36c3CM1Ts6L4Tsr+PtU=
+k8s.io/client-go v0.19.6/go.mod h1:gEiS+efRlXYUEQ9Oz4lmNXlxAl5JZ8y2zbTDGhvXXnk=
+k8s.io/cluster-bootstrap v0.0.0-20190918163108-da9fdfce26bb/go.mod h1:mQVbtFRxlw/BzBqBaQwIMzjDTST1KrGtzWaR4CGlsTU=
+k8s.io/cluster-bootstrap v0.16.8/go.mod h1:fT1U/qWmXNmIColCsCBg4G881nWFaEqONL0xmP48rkI=
+k8s.io/cluster-bootstrap v0.18.8/go.mod h1:guq0Uc+QwazHgpS1yAw5Z7yUlBCtGppbgWQkbN3lxIY=
+k8s.io/cluster-bootstrap v0.18.10 h1:M1fNZOg+GwCZ6axP3j/FKv0o/tOIjL0uYOWesQUjUoo=
+k8s.io/cluster-bootstrap v0.18.10/go.mod h1:HTuFGIjxRohQP0IrigLxVtxL+ruDVo/ookmfs608WqQ=
+k8s.io/cluster-bootstrap v0.19.6 h1:ZtOoKjWEZP6BEuDd55B3sHTjneutv0z1oh3UfWiKxpc=
+k8s.io/cluster-bootstrap v0.19.6/go.mod h1:9Ft1ED2O3k+4+gtkkth/Y0qHCdi9y+IMI8wh4HszXi4=
+k8s.io/code-generator v0.19.6 h1:N7PlZyX25j5Jl9oIBphWN2qp1AKZOwXdDVfj2Z0V0p8=
+k8s.io/code-generator v0.19.6/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.19.6 h1:V76d3rIEWvP95peWgRycKslQnEwlaPy4UORvh3+YBbU=
+k8s.io/component-base v0.19.6/go.mod h1:8Btsf8J00/fVDa/YFmXjei7gVkcFrlKZXjSeP4SZNJg=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190826232639-a874a240740c h1:HH5z+xQGPLMQ2MlS+UVaOaSFgaEqGw1Zb007B9yjZEY=
+k8s.io/gengo v0.0.0-20190826232639-a874a240740c/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/helm v2.13.1+incompatible h1:qt0LBsHQ7uxCtS3F2r3XI0DNm8ml0xQeSJixUorDyn0=
+k8s.io/helm v2.13.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-aggregator v0.0.0-20191004104030-d9d5f0cc7532/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU=
+k8s.io/kube-aggregator v0.16.8/go.mod h1:l73g+bVdjrgDz9nrISk6AgupGbv1n+4WjTbGaXz/YvI=
+k8s.io/kube-aggregator v0.18.0/go.mod h1:ateewQ5QbjMZF/dihEFXwaEwoA4v/mayRvzfmvb6eqI=
+k8s.io/kube-aggregator v0.18.8/go.mod h1:CyLoGZB+io8eEwnn+6RbV7QWJQhj8a3TBH8ZM8sLbhI=
+k8s.io/kube-aggregator v0.18.10 h1:dEq7kt0hM0JLZK+NYeUDjmuPRWTBCct+2biXD4aJsE8=
+k8s.io/kube-aggregator v0.18.10/go.mod h1:4hDj1WpnMJTXhMlDHf14zB0B/hrFCY6dBN0ZHQNqiyQ=
+k8s.io/kube-aggregator v0.19.6 h1:huAkb9MZVN56gQ5fXe0eckFF6pbt167tPU6wkIMpiV8=
+k8s.io/kube-aggregator v0.19.6/go.mod h1:BeD33Jp5LLaDH4t9oh1B+LkOY9D5+xhAC8I3ZSvI6m0=
+k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
+k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29 h1:NeQXVJ2XFSkRoPzRo8AId01ZER+j8oV4SZADT4iBOXQ=
+k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-scheduler v0.19.6/go.mod h1:Z+BYHsgR340yFzs2UJS/H0wSEqrN4mDFvq3VGzhq78U=
+k8s.io/kubectl v0.0.0-20191016120415-2ed914427d51/go.mod h1:gL826ZTIfD4vXTGlmzgTbliCAT9NGiqpCqK2aNYv5MQ=
+k8s.io/kubectl v0.0.0-20191219154910-1528d4eea6dd/go.mod h1:9ehGcuUGjXVZh0qbYSB0vvofQw2JQe6c6cO0k4wu/Oo=
+k8s.io/kubelet v0.16.8/go.mod h1:mzDpnryQg2dlB6V3/WAgb1baIamiICtWpXMFrPOFh6I=
+k8s.io/kubelet v0.18.8/go.mod h1:6z1jHCk0NPE6WshFStfqcgQ1bnD3tetcPmhC2915aio=
+k8s.io/kubelet v0.18.10/go.mod h1:u0wQ2njU/1fcyI/EChi9wAA0WJmgCwA8t6y45CsI+hE=
+k8s.io/kubelet v0.19.6/go.mod h1:/yashsvRBHMGFnxpmTjtaI0sJ4rLJno9zXzc6PPU8Ls=
+k8s.io/metrics v0.0.0-20191004105854-2e8cf7d0888c/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M=
+k8s.io/metrics v0.0.0-20191016113814-3b1a734dba6e/go.mod h1:ve7/vMWeY5lEBkZf6Bt5TTbGS3b8wAxwGbdXAsufjRs=
+k8s.io/metrics v0.0.0-20191214191643-6b1944c9f765/go.mod h1:5V7rewilItwK0cz4nomU0b3XCcees2Ka5EBYWS1HBeM=
+k8s.io/metrics v0.16.8 h1:VHYjoncB4WjvizvQ36vQ2kga4jo7+hLhJIYi60JGru0=
+k8s.io/metrics v0.16.8/go.mod h1:uBIJKJKdga8vL76a1dl+eRlUqOAdCbBpvFHC28SbUIY=
+k8s.io/metrics v0.18.8 h1:Obf262GVd2Uy+WbPkOXNiZroI5mT8zYoKK3Y/8KF7Yc=
+k8s.io/metrics v0.18.8/go.mod h1:j7JzZdiyhLP2BsJm/Fzjs+j5Lb1Y7TySjhPWqBPwRXA=
+k8s.io/metrics v0.18.10 h1:y1+lsuGwHGPRVcBuoZtaULSVvAYo3k6d3S6JNxxBsng=
+k8s.io/metrics v0.18.10/go.mod h1:7FQ0/pv0J15/kp8s8WvaeU6tz6jzAKk+xh1eHGwiaW8=
+k8s.io/metrics v0.19.6 h1:2fBDib9hWUjV8PAoz4CltjXXBUdlUXonJvLgIo8W3mc=
+k8s.io/metrics v0.19.6/go.mod h1:jM61saf/bjMRmow6zan2cAk8vFDmqvbNXFRbB4g7TNs=
+k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE=
+k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20191010214722-8d271d903fe4/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20191218082557-f07c713de883/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20200327001022-6496210b90e8 h1:6JFbaLjRyBz8K2Jvt+pcT+N3vvwMZfg8MfVENwe9aag=
+k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 h1:v8ud2Up6QK1lNOKFgiIVrZdMg7MpmSnvtrOieolJKoE=
+k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 h1:7Nu2dTj82c6IaWvL7hImJzcXoTPz1MsSCH7r+0m6rfo=
+k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g=
+k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
+mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
+mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/letsencrypt v0.0.1/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
+rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
+sigs.k8s.io/cli-utils v0.16.0 h1:Wr32m1oxjIqc9G9l+igr13PeIM9LCyq8jQ8KjXKelvg=
+sigs.k8s.io/cli-utils v0.16.0/go.mod h1:9Jqm9K2W6ShhCxsEuaz6HSRKKOXigPUx3ZfypGgxBLY=
+sigs.k8s.io/controller-runtime v0.2.0-beta.5/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME=
+sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg=
+sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns=
+sigs.k8s.io/controller-runtime v0.6.3 h1:SBbr+inLPEKhvlJtrvDcwIpm+uhDvp63Bl72xYJtoOE=
+sigs.k8s.io/controller-runtime v0.6.3/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY=
+sigs.k8s.io/controller-runtime v0.7.1 h1:nqVwzVzdenfd9xIbB35pC7JJH2IXVL4hDo3MNzkyCh4=
+sigs.k8s.io/controller-runtime v0.7.1/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU=
+sigs.k8s.io/controller-tools v0.2.0-beta.4/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y=
+sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA=
+sigs.k8s.io/controller-tools v0.2.9/go.mod h1:ArP7w60JQKkZf7UU2oWTVnEhoNGA+sOMyuSuS+JFNDQ=
+sigs.k8s.io/controller-tools v0.3.0 h1:y3YD99XOyWaXkiF1kd41uRvfp/64teWcrEZFuHxPhJ4=
+sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI=
+sigs.k8s.io/controller-tools v0.4.1 h1:VkuV0MxlRPmRu5iTgBZU4UxUX2LiR99n3sdQGRxZF4w=
+sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU=
+sigs.k8s.io/kind v0.7.0/go.mod h1:An/AbWHT6pA/Lm0Og8j3ukGhfJP3RiVN/IBU6Lo3zl8=
+sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
+sigs.k8s.io/kustomize/api v0.3.3-0.20200328155553-20184e9835c7/go.mod h1:4crE391uVAOFgkmaSgQ9w3CprZ6kwCNoL+Ft8II+C70=
+sigs.k8s.io/kustomize/api v0.6.0/go.mod h1:M7410E0ULUFQlxRskB//n5G0MPwGvs9HG6K8Sf8gw+M=
+sigs.k8s.io/kustomize/kstatus v0.0.2-0.20200328155553-20184e9835c7/go.mod h1:bOsdduDrrGscx4Ij30+TPdBH699eXvAywACCH2zsgeM=
+sigs.k8s.io/kustomize/kyaml v0.1.1/go.mod h1:/NdPPfrperSCGjm55cwEro1loBVtbtVIXSb7FguK6uk=
+sigs.k8s.io/kustomize/kyaml v0.4.0/go.mod h1:XJL84E6sOFeNrQ7CADiemc1B0EjIxHo3OhW4o1aJYNw=
+sigs.k8s.io/kustomize/kyaml v0.7.1/go.mod h1:ne3F9JPhW2wrVaLslxBsEe6MQJQ9YK5rUutrdhBWXwI=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
+sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM=
+sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w=
+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
+vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
diff --git a/hack/api-reference/config.json b/hack/api-reference/config.json
new file mode 100644
index 0000000..174053f
--- /dev/null
+++ b/hack/api-reference/config.json
@@ -0,0 +1,24 @@
+{
+ "hideMemberFields": [
+ "TypeMeta"
+ ],
+ "hideTypePatterns": [
+ "ParseError$",
+ "List$"
+ ],
+ "externalPackages": [
+ {
+ "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
+ "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
+ },
+ {
+ "typeMatchPrefix": "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config",
+ "docsURLTemplate": "https://github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ }
+ ],
+ "typeDisplayNamePrefixOverrides": {
+ "k8s.io/api/": "Kubernetes ",
+ "k8s.io/apimachinery/pkg/apis/": "Kubernetes "
+ },
+ "markdownDisabled": false
+}
diff --git a/hack/api-reference/config.md b/hack/api-reference/config.md
new file mode 100644
index 0000000..393e7fa
--- /dev/null
+++ b/hack/api-reference/config.md
@@ -0,0 +1,97 @@
+Packages:
+
+shoot-fleet-agent-service.extensions.config.gardener.cloud/v1alpha1
+
+
Package v1alpha1 contains the Azure provider configuration API resources.
+
+Resource Types:
+
+FleetAgentConfig
+
+
+
FleetAgentConfig configuration resource
+
+
+
+
+| Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+shoot-fleet-agent-service.extensions.config.gardener.cloud/v1alpha1
+
+ |
+
+
+
+kind
+string
+ |
+FleetAgentConfig |
+
+
+
+clientConnection
+
+k8s.io/component-base/config/v1alpha1.ClientConnectionConfiguration
+
+ |
+
+(Optional)
+ ClientConnection specifies the kubeconfig file and client connection
+settings for the proxy server to use when communicating with the apiserver.
+ |
+
+
+
+labels
+
+map[string]string
+
+ |
+
+ labels to use in Fleet Cluster registration
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ namespace to store clusters registrations in Fleet managers cluster
+ |
+
+
+
+healthCheckConfig
+
+
+github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config.HealthCheckConfig
+
+
+ |
+
+ |
+
+
+
+
+
+Generated with gen-crd-api-reference-docs
+
diff --git a/hack/component_descriptor b/hack/component_descriptor
new file mode 100755
index 0000000..3c378fa
--- /dev/null
+++ b/hack/component_descriptor
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+# taken from github.com/gardener and modified slightly to acommodate changed image-repository for this repository
+
+set -e
+
+repo_root_dir="$1"
+repo_name="${2:-github.com/gardener/gardener}"
+descriptor_out_file="${COMPONENT_DESCRIPTOR_PATH}"
+
+echo "enriching creating component descriptor from ${BASE_DEFINITION_PATH}"
+
+if [[ -f "$repo_root_dir/charts/images.yaml" ]]; then
+ images="$(yaml2json < "$repo_root_dir/charts/images.yaml")"
+ eval "$(jq -r ".images |
+ map(select(.sourceRepository != \"$repo_name\") |
+ if (.name == \"hyperkube\" or .name == \"kube-apiserver\" or .name == \"kube-controller-manager\" or .name == \"kube-scheduler\" or .name == \"kube-proxy\" or .repository == \"k8s.gcr.io/hyperkube\") then
+ \"--generic-dependencies '{\\\"name\\\": \\\"\" + .name + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\"
+ elif (.repository | startswith(\"eu.gcr.io/gardener-project/cert-controller-manager\")) then
+ \"--component-dependencies '{\\\"name\\\": \\\"\" + .sourceRepository + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\"
+ else
+ \"--container-image-dependencies '{\\\"name\\\": \\\"\" + .name + \"\\\", \\\"image_reference\\\": \\\"\" + .repository + \":\" + .tag + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\"
+ end) |
+ \"${ADD_DEPENDENCIES_CMD} \\\\\n\" +
+ join(\" \\\\\n\")" <<< "$images")"
+fi
+
+if [[ -d "$repo_root_dir/charts/" ]]; then
+ for image_tpl_path in "$repo_root_dir/charts/"*"/templates/_images.tpl"; do
+ if [[ ! -f "$image_tpl_path" ]]; then
+ continue
+ fi
+
+ outputFile=$(sed 's/{{-//' $image_tpl_path | sed 's/}}//' | sed 's/define//' | sed 's/-//' | sed 's/end//' | sed 's/"//' | sed 's/"//' |sed 's/image.//' | sed -e 's/^[ \t]*//' | awk -v RS= '{for (i=1; i<=NF; i++) printf "%s%s", $i, (i==NF?"\n":" ")}')
+ echo "enriching creating component descriptor from ${image_tpl_path}"
+
+ while read p; do
+ line="$(echo -e "$p")"
+ IFS=' ' read -r -a array <<< "$line"
+ IFS=': ' read -r -a imageAndTag <<< ${array[1]}
+
+ NAME=${array[0]}
+ REPOSITORY=${imageAndTag[0]}
+ TAG=${imageAndTag[1]}
+
+ gardener="eu.gcr.io/gardener-project/gardener"
+ if [[ "$NAME" == "hyperkube" ]]; then
+ ${ADD_DEPENDENCIES_CMD} --generic-dependencies "{\"name\": \"$NAME\", \"version\": \"$TAG\"}"
+ elif [[ $REPOSITORY =~ "eu.gcr.io/gardener-project/gardener"* ]]; then
+ ${ADD_DEPENDENCIES_CMD} --generic-dependencies "{\"name\": \"$NAME\", \"version\": \"$TAG\"}"
+ else
+ ${ADD_DEPENDENCIES_CMD} --container-image-dependencies "{\"name\": \"${NAME}\", \"image_reference\": \"${REPOSITORY}:${TAG}\", \"version\": \"$TAG\"}"
+ fi
+ done < <(echo "$outputFile")
+ done
+fi
+
+cp "${BASE_DEFINITION_PATH}" "${descriptor_out_file}"
diff --git a/hack/tools.go b/hack/tools.go
new file mode 100644
index 0000000..d382cb5
--- /dev/null
+++ b/hack/tools.go
@@ -0,0 +1,32 @@
+// +build tools
+
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package imports things required by build scripts, to force `go mod` to see them as dependencies
+package tools
+
+import (
+ _ "github.com/gardener/gardener/.github"
+ _ "github.com/gardener/gardener/.github/ISSUE_TEMPLATE"
+ _ "github.com/gardener/gardener/hack"
+ _ "github.com/gardener/gardener/hack/.ci"
+ _ "github.com/gardener/gardener/hack/api-reference/template"
+
+ _ "github.com/ahmetb/gen-crd-api-reference-docs"
+ _ "github.com/gobuffalo/packr/v2/packr2"
+ _ "github.com/golang/mock/mockgen"
+ _ "github.com/onsi/ginkgo/ginkgo"
+ _ "k8s.io/code-generator"
+)
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
new file mode 100755
index 0000000..f72fa6b
--- /dev/null
+++ b/hack/update-codegen.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $GOPATH/bin/*-gen
+
+PROJECT_ROOT=$(dirname $0)/..
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/componentconfig \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis \
+ "config:v1alpha1" \
+ --go-header-file "${PROJECT_ROOT}/vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/componentconfig \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis \
+ "config:v1alpha1" \
+ --extra-peer-dirs=github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config,github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime, github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config/v1alpha1 \
+ --go-header-file "${PROJECT_ROOT}/vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion,client \
+ github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet \
+ github.com/rancher/fleet/pkg/apis \
+ github.com/rancher/fleet/pkg/apis \
+ "fleet.cattle.io:v1alpha1" \
+ --go-header-file "${PROJECT_ROOT}/vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt"
+ #--extra-peer-dirs=github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config,github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime, github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config/v1alpha1
\ No newline at end of file
diff --git a/hack/update-github-templates.sh b/hack/update-github-templates.sh
new file mode 100755
index 0000000..3754d6a
--- /dev/null
+++ b/hack/update-github-templates.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+mkdir -p "$(dirname $0)/../.github" "$(dirname $0)/../.github/ISSUE_TEMPLATE"
+
+for file in `find "$(dirname $0)"/../vendor/github.com/gardener/gardener/.github -name '*.md'`; do
+ cat "$file" |\
+ sed 's/operating Gardener/working with this Gardener extension/g' |\
+ sed 's/to the Gardener project/for this extension/g' |\
+ sed 's/to Gardener/to this extension/g' |\
+ sed 's/- Gardener version:/- Gardener version (if relevant):\n- Extension version:/g' \
+ > "$(dirname $0)/../.github/${file#*.github/}"
+done
\ No newline at end of file
diff --git a/pkg/apis/config/doc.go b/pkg/apis/config/doc.go
new file mode 100644
index 0000000..83c4cc7
--- /dev/null
+++ b/pkg/apis/config/doc.go
@@ -0,0 +1,5 @@
+// +k8s:deepcopy-gen=package
+// +groupName="shoot-fleet-agent-service.extensions.config.gardener.cloud"
+
+//go:generate ../../../hack/update-codegen.sh
+package config
diff --git a/pkg/apis/config/install/install.go b/pkg/apis/config/install/install.go
new file mode 100644
index 0000000..7b40803
--- /dev/null
+++ b/pkg/apis/config/install/install.go
@@ -0,0 +1,31 @@
+package install
+
+import (
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config"
+ v1alpha1 "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config/v1alpha1"
+ v1alpha1fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(
+ v1alpha1.AddToScheme,
+ v1alpha1fleet.AddToScheme,
+ config.AddToScheme,
+ setVersionPriority,
+ )
+
+ // AddToScheme adds all APIs to the scheme.
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+func setVersionPriority(scheme *runtime.Scheme) error {
+ return scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)
+}
+
+// Install installs all APIs in the scheme.
+func Install(scheme *runtime.Scheme) {
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/pkg/apis/config/loader/loader.go b/pkg/apis/config/loader/loader.go
new file mode 100644
index 0000000..18ef1d9
--- /dev/null
+++ b/pkg/apis/config/loader/loader.go
@@ -0,0 +1,58 @@
+package loader
+
+import (
+ "io/ioutil"
+
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config"
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config/install"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+)
+
+var (
+ Codec runtime.Codec
+ Scheme *runtime.Scheme
+)
+
+func init() {
+ Scheme = runtime.NewScheme()
+ install.Install(Scheme)
+ yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme)
+ Codec = versioning.NewDefaultingCodecForScheme(
+ Scheme,
+ yamlSerializer,
+ yamlSerializer,
+ schema.GroupVersion{Version: "v1alpha1"},
+ runtime.InternalGroupVersioner,
+ )
+}
+
+// LoadFromFile takes a filename and de-serializes the contents into FleetAgentConfig object.
+func LoadFromFile(filename string) (*config.FleetAgentConfig, error) {
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ return Load(bytes)
+}
+
+// Load takes a byte slice and de-serializes the contents into FleetAgentConfig object.
+// Encapsulates de-serialization without assuming the source is a file.
+func Load(data []byte) (*config.FleetAgentConfig, error) {
+ cfg := &config.FleetAgentConfig{}
+
+ if len(data) == 0 {
+ return cfg, nil
+ }
+
+ decoded, _, err := Codec.Decode(data, &schema.GroupVersionKind{Version: "v1alpha1", Kind: "Config"}, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ return decoded.(*config.FleetAgentConfig), nil
+}
diff --git a/pkg/apis/config/register.go b/pkg/apis/config/register.go
new file mode 100644
index 0000000..36d324b
--- /dev/null
+++ b/pkg/apis/config/register.go
@@ -0,0 +1,37 @@
+package config
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "shoot-fleet-agent-service.extensions.config.gardener.cloud"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // schemeBuilder used to register the Shoot resource.
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a pointer to schemeBuilder.AddToScheme.
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &FleetAgentConfig{},
+ )
+ return nil
+}
diff --git a/pkg/apis/config/types.go b/pkg/apis/config/types.go
new file mode 100644
index 0000000..6102f45
--- /dev/null
+++ b/pkg/apis/config/types.go
@@ -0,0 +1,26 @@
+package config
+
+import (
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ componentbaseconfig "k8s.io/component-base/config"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// FleetAgentConfig configuration resource
+type FleetAgentConfig struct {
+ metav1.TypeMeta
+
+ // ClientConnection specifies the kubeconfig file and client connection
+ // settings for the proxy server to use when communicating with the apiserver.
+ ClientConnection *componentbaseconfig.ClientConnectionConfiguration
+
+ // labels to use in Fleet Cluster registration
+ Labels map[string]string
+
+ //namespace to store clusters registrations in Fleet managers cluster
+ Namespace string
+
+ HealthCheckConfig *healthcheckconfig.HealthCheckConfig
+}
diff --git a/pkg/apis/config/v1alpha1/defaults.go b/pkg/apis/config/v1alpha1/defaults.go
new file mode 100644
index 0000000..81e0862
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/defaults.go
@@ -0,0 +1,9 @@
+package v1alpha1
+
+import (
+"k8s.io/apimachinery/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+ return RegisterDefaults(scheme)
+}
diff --git a/pkg/apis/config/v1alpha1/doc.go b/pkg/apis/config/v1alpha1/doc.go
new file mode 100644
index 0000000..fb27e0b
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/doc.go
@@ -0,0 +1,10 @@
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/config.json -template-dir ../../../../vendor/github.com/gardener/gardener/hack/api-reference/template -out-file ../../../../hack/api-reference/config.md
+
+// Package v1alpha1 contains the Azure provider configuration API resources.
+// +groupName=shoot-fleet-agent-service.extensions.config.gardener.cloud
+package v1alpha1
diff --git a/pkg/apis/config/v1alpha1/register.go b/pkg/apis/config/v1alpha1/register.go
new file mode 100644
index 0000000..8c08466
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/register.go
@@ -0,0 +1,40 @@
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "shoot-fleet-agent-service.extensions.config.gardener.cloud"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder used to register the Shoot resource.
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a pointer to SchemeBuilder.AddToScheme.
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addDefaultingFuncs, addKnownTypes)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &FleetAgentConfig{},
+ )
+ return nil
+}
\ No newline at end of file
diff --git a/pkg/apis/config/v1alpha1/types.go b/pkg/apis/config/v1alpha1/types.go
new file mode 100644
index 0000000..388a097
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/types.go
@@ -0,0 +1,28 @@
+package v1alpha1
+
+import (
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// FleetAgentConfig configuration resource
+type FleetAgentConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ClientConnection specifies the kubeconfig file and client connection
+ // settings for the proxy server to use when communicating with the apiserver.
+ // +optional
+ ClientConnection *componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
+
+ // labels to use in Fleet Cluster registration
+ Labels map[string]string `json:"labels,omitempty"`
+
+ //namespace to store clusters registrations in Fleet managers cluster
+ Namespace string `json:"namespace,omitempty"`
+
+ HealthCheckConfig *healthcheckconfig.HealthCheckConfig `json:"healthCheckConfig,omitempty"`
+}
diff --git a/pkg/apis/config/v1alpha1/zz_generated.conversion.go b/pkg/apis/config/v1alpha1/zz_generated.conversion.go
new file mode 100644
index 0000000..2b61c0c
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/zz_generated.conversion.go
@@ -0,0 +1,78 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ unsafe "unsafe"
+
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ config "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ componentbaseconfig "k8s.io/component-base/config"
+ configv1alpha1 "k8s.io/component-base/config/v1alpha1"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*FleetAgentConfig)(nil), (*config.FleetAgentConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_FleetAgentConfig_To_config_FleetAgentConfig(a.(*FleetAgentConfig), b.(*config.FleetAgentConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*config.FleetAgentConfig)(nil), (*FleetAgentConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_config_FleetAgentConfig_To_v1alpha1_FleetAgentConfig(a.(*config.FleetAgentConfig), b.(*FleetAgentConfig), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1alpha1_FleetAgentConfig_To_config_FleetAgentConfig(in *FleetAgentConfig, out *config.FleetAgentConfig, s conversion.Scope) error {
+ out.ClientConnection = (*componentbaseconfig.ClientConnectionConfiguration)(unsafe.Pointer(in.ClientConnection))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Namespace = in.Namespace
+ out.HealthCheckConfig = (*healthcheckconfig.HealthCheckConfig)(unsafe.Pointer(in.HealthCheckConfig))
+ return nil
+}
+
+// Convert_v1alpha1_FleetAgentConfig_To_config_FleetAgentConfig is an autogenerated conversion function.
+func Convert_v1alpha1_FleetAgentConfig_To_config_FleetAgentConfig(in *FleetAgentConfig, out *config.FleetAgentConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_FleetAgentConfig_To_config_FleetAgentConfig(in, out, s)
+}
+
+func autoConvert_config_FleetAgentConfig_To_v1alpha1_FleetAgentConfig(in *config.FleetAgentConfig, out *FleetAgentConfig, s conversion.Scope) error {
+ out.ClientConnection = (*configv1alpha1.ClientConnectionConfiguration)(unsafe.Pointer(in.ClientConnection))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Namespace = in.Namespace
+ out.HealthCheckConfig = (*healthcheckconfig.HealthCheckConfig)(unsafe.Pointer(in.HealthCheckConfig))
+ return nil
+}
+
+// Convert_config_FleetAgentConfig_To_v1alpha1_FleetAgentConfig is an autogenerated conversion function.
+func Convert_config_FleetAgentConfig_To_v1alpha1_FleetAgentConfig(in *config.FleetAgentConfig, out *FleetAgentConfig, s conversion.Scope) error {
+ return autoConvert_config_FleetAgentConfig_To_v1alpha1_FleetAgentConfig(in, out, s)
+}
diff --git a/pkg/apis/config/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/config/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..f8eb515
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,69 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ config "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ configv1alpha1 "k8s.io/component-base/config/v1alpha1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FleetAgentConfig) DeepCopyInto(out *FleetAgentConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ClientConnection != nil {
+ in, out := &in.ClientConnection, &out.ClientConnection
+ *out = new(configv1alpha1.ClientConnectionConfiguration)
+ **out = **in
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.HealthCheckConfig != nil {
+ in, out := &in.HealthCheckConfig, &out.HealthCheckConfig
+ *out = new(config.HealthCheckConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetAgentConfig.
+func (in *FleetAgentConfig) DeepCopy() *FleetAgentConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(FleetAgentConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *FleetAgentConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/pkg/apis/config/v1alpha1/zz_generated.defaults.go b/pkg/apis/config/v1alpha1/zz_generated.defaults.go
new file mode 100644
index 0000000..95e01b6
--- /dev/null
+++ b/pkg/apis/config/v1alpha1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/pkg/apis/config/zz_generated.deepcopy.go b/pkg/apis/config/zz_generated.deepcopy.go
new file mode 100644
index 0000000..d24cfe0
--- /dev/null
+++ b/pkg/apis/config/zz_generated.deepcopy.go
@@ -0,0 +1,69 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package config
+
+import (
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ componentbaseconfig "k8s.io/component-base/config"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FleetAgentConfig) DeepCopyInto(out *FleetAgentConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ClientConnection != nil {
+ in, out := &in.ClientConnection, &out.ClientConnection
+ *out = new(componentbaseconfig.ClientConnectionConfiguration)
+ **out = **in
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.HealthCheckConfig != nil {
+ in, out := &in.HealthCheckConfig, &out.HealthCheckConfig
+ *out = new(healthcheckconfig.HealthCheckConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetAgentConfig.
+func (in *FleetAgentConfig) DeepCopy() *FleetAgentConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(FleetAgentConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *FleetAgentConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/pkg/client/fleet/clientset/versioned/clientset.go b/pkg/client/fleet/clientset/versioned/clientset.go
new file mode 100644
index 0000000..750c5fc
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/clientset.go
@@ -0,0 +1,97 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+
+ fleetv1alpha1 "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ FleetV1alpha1() fleetv1alpha1.FleetV1alpha1Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ fleetV1alpha1 *fleetv1alpha1.FleetV1alpha1Client
+}
+
+// FleetV1alpha1 retrieves the FleetV1alpha1Client
+func (c *Clientset) FleetV1alpha1() fleetv1alpha1.FleetV1alpha1Interface {
+ return c.fleetV1alpha1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.fleetV1alpha1, err = fleetv1alpha1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.fleetV1alpha1 = fleetv1alpha1.NewForConfigOrDie(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.fleetV1alpha1 = fleetv1alpha1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/pkg/client/fleet/clientset/versioned/doc.go b/pkg/client/fleet/clientset/versioned/doc.go
new file mode 100644
index 0000000..32d8522
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package versioned
diff --git a/pkg/client/fleet/clientset/versioned/fake/clientset_generated.go b/pkg/client/fleet/clientset/versioned/fake/clientset_generated.go
new file mode 100644
index 0000000..1a2b495
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/fake/clientset_generated.go
@@ -0,0 +1,82 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ clientset "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned"
+ fleetv1alpha1 "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1"
+ fakefleetv1alpha1 "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{tracker: o}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+ tracker testing.ObjectTracker
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+func (c *Clientset) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
+
+var _ clientset.Interface = &Clientset{}
+
+// FleetV1alpha1 retrieves the FleetV1alpha1Client
+func (c *Clientset) FleetV1alpha1() fleetv1alpha1.FleetV1alpha1Interface {
+ return &fakefleetv1alpha1.FakeFleetV1alpha1{Fake: &c.Fake}
+}
diff --git a/pkg/client/fleet/clientset/versioned/fake/doc.go b/pkg/client/fleet/clientset/versioned/fake/doc.go
new file mode 100644
index 0000000..bf5cb47
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/pkg/client/fleet/clientset/versioned/fake/register.go b/pkg/client/fleet/clientset/versioned/fake/register.go
new file mode 100644
index 0000000..a374cfb
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/fake/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ fleetv1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+
+var localSchemeBuilder = runtime.SchemeBuilder{
+ fleetv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/pkg/client/fleet/clientset/versioned/scheme/doc.go b/pkg/client/fleet/clientset/versioned/scheme/doc.go
new file mode 100644
index 0000000..7d4fb77
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/pkg/client/fleet/clientset/versioned/scheme/register.go b/pkg/client/fleet/clientset/versioned/scheme/register.go
new file mode 100644
index 0000000..bbe1600
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/scheme/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ fleetv1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ fleetv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundle.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundle.go
new file mode 100644
index 0000000..aaeb7e8
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundle.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BundlesGetter has a method to return a BundleInterface.
+// A group's client should implement this interface.
+type BundlesGetter interface {
+ Bundles(namespace string) BundleInterface
+}
+
+// BundleInterface has methods to work with Bundle resources.
+type BundleInterface interface {
+ Create(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.CreateOptions) (*v1alpha1.Bundle, error)
+ Update(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.UpdateOptions) (*v1alpha1.Bundle, error)
+ UpdateStatus(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.UpdateOptions) (*v1alpha1.Bundle, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Bundle, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BundleList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Bundle, err error)
+ BundleExpansion
+}
+
+// bundles implements BundleInterface
+type bundles struct {
+ client rest.Interface
+ ns string
+}
+
+// newBundles returns a Bundles
+func newBundles(c *FleetV1alpha1Client, namespace string) *bundles {
+ return &bundles{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the bundle, and returns the corresponding bundle object, and an error if there is any.
+func (c *bundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Bundle, err error) {
+ result = &v1alpha1.Bundle{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("bundles").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Bundles that match those selectors.
+func (c *bundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BundleList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.BundleList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("bundles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested bundles.
+func (c *bundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("bundles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a bundle and creates it. Returns the server's representation of the bundle, and an error, if there is any.
+func (c *bundles) Create(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.CreateOptions) (result *v1alpha1.Bundle, err error) {
+ result = &v1alpha1.Bundle{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("bundles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundle).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a bundle and updates it. Returns the server's representation of the bundle, and an error, if there is any.
+func (c *bundles) Update(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.UpdateOptions) (result *v1alpha1.Bundle, err error) {
+ result = &v1alpha1.Bundle{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("bundles").
+ Name(bundle.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundle).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *bundles) UpdateStatus(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.UpdateOptions) (result *v1alpha1.Bundle, err error) {
+ result = &v1alpha1.Bundle{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("bundles").
+ Name(bundle.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundle).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the bundle and deletes it. Returns an error if one occurs.
+func (c *bundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("bundles").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *bundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("bundles").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched bundle.
+func (c *bundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Bundle, err error) {
+ result = &v1alpha1.Bundle{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("bundles").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundledeployment.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundledeployment.go
new file mode 100644
index 0000000..bb9e03b
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundledeployment.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BundleDeploymentsGetter has a method to return a BundleDeploymentInterface.
+// A group's client should implement this interface.
+type BundleDeploymentsGetter interface {
+ BundleDeployments(namespace string) BundleDeploymentInterface
+}
+
+// BundleDeploymentInterface has methods to work with BundleDeployment resources.
+type BundleDeploymentInterface interface {
+ Create(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.CreateOptions) (*v1alpha1.BundleDeployment, error)
+ Update(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.UpdateOptions) (*v1alpha1.BundleDeployment, error)
+ UpdateStatus(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.UpdateOptions) (*v1alpha1.BundleDeployment, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BundleDeployment, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BundleDeploymentList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BundleDeployment, err error)
+ BundleDeploymentExpansion
+}
+
+// bundleDeployments implements BundleDeploymentInterface
+type bundleDeployments struct {
+ client rest.Interface
+ ns string
+}
+
+// newBundleDeployments returns a BundleDeployments
+func newBundleDeployments(c *FleetV1alpha1Client, namespace string) *bundleDeployments {
+ return &bundleDeployments{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the bundleDeployment, and returns the corresponding bundleDeployment object, and an error if there is any.
+func (c *bundleDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BundleDeployment, err error) {
+ result = &v1alpha1.BundleDeployment{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of BundleDeployments that match those selectors.
+func (c *bundleDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BundleDeploymentList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.BundleDeploymentList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested bundleDeployments.
+func (c *bundleDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a bundleDeployment and creates it. Returns the server's representation of the bundleDeployment, and an error, if there is any.
+func (c *bundleDeployments) Create(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.CreateOptions) (result *v1alpha1.BundleDeployment, err error) {
+ result = &v1alpha1.BundleDeployment{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundleDeployment).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a bundleDeployment and updates it. Returns the server's representation of the bundleDeployment, and an error, if there is any.
+func (c *bundleDeployments) Update(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.UpdateOptions) (result *v1alpha1.BundleDeployment, err error) {
+ result = &v1alpha1.BundleDeployment{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ Name(bundleDeployment.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundleDeployment).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *bundleDeployments) UpdateStatus(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.UpdateOptions) (result *v1alpha1.BundleDeployment, err error) {
+ result = &v1alpha1.BundleDeployment{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ Name(bundleDeployment.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundleDeployment).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the bundleDeployment and deletes it. Returns an error if one occurs.
+func (c *bundleDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *bundleDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched bundleDeployment.
+func (c *bundleDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BundleDeployment, err error) {
+ result = &v1alpha1.BundleDeployment{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("bundledeployments").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundlenamespacemapping.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundlenamespacemapping.go
new file mode 100644
index 0000000..5c33369
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/bundlenamespacemapping.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BundleNamespaceMappingsGetter has a method to return a BundleNamespaceMappingInterface.
+// A group's client should implement this interface.
+type BundleNamespaceMappingsGetter interface {
+ BundleNamespaceMappings(namespace string) BundleNamespaceMappingInterface
+}
+
+// BundleNamespaceMappingInterface has methods to work with BundleNamespaceMapping resources.
+type BundleNamespaceMappingInterface interface {
+ Create(ctx context.Context, bundleNamespaceMapping *v1alpha1.BundleNamespaceMapping, opts v1.CreateOptions) (*v1alpha1.BundleNamespaceMapping, error)
+ Update(ctx context.Context, bundleNamespaceMapping *v1alpha1.BundleNamespaceMapping, opts v1.UpdateOptions) (*v1alpha1.BundleNamespaceMapping, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BundleNamespaceMapping, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BundleNamespaceMappingList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BundleNamespaceMapping, err error)
+ BundleNamespaceMappingExpansion
+}
+
+// bundleNamespaceMappings implements BundleNamespaceMappingInterface
+type bundleNamespaceMappings struct {
+ client rest.Interface
+ ns string
+}
+
+// newBundleNamespaceMappings returns a BundleNamespaceMappings
+func newBundleNamespaceMappings(c *FleetV1alpha1Client, namespace string) *bundleNamespaceMappings {
+ return &bundleNamespaceMappings{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the bundleNamespaceMapping, and returns the corresponding bundleNamespaceMapping object, and an error if there is any.
+func (c *bundleNamespaceMappings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ result = &v1alpha1.BundleNamespaceMapping{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of BundleNamespaceMappings that match those selectors.
+func (c *bundleNamespaceMappings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BundleNamespaceMappingList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.BundleNamespaceMappingList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested bundleNamespaceMappings.
+func (c *bundleNamespaceMappings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a bundleNamespaceMapping and creates it. Returns the server's representation of the bundleNamespaceMapping, and an error, if there is any.
+func (c *bundleNamespaceMappings) Create(ctx context.Context, bundleNamespaceMapping *v1alpha1.BundleNamespaceMapping, opts v1.CreateOptions) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ result = &v1alpha1.BundleNamespaceMapping{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundleNamespaceMapping).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a bundleNamespaceMapping and updates it. Returns the server's representation of the bundleNamespaceMapping, and an error, if there is any.
+func (c *bundleNamespaceMappings) Update(ctx context.Context, bundleNamespaceMapping *v1alpha1.BundleNamespaceMapping, opts v1.UpdateOptions) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ result = &v1alpha1.BundleNamespaceMapping{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ Name(bundleNamespaceMapping.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(bundleNamespaceMapping).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the bundleNamespaceMapping and deletes it. Returns an error if one occurs.
+func (c *bundleNamespaceMappings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *bundleNamespaceMappings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched bundleNamespaceMapping.
+func (c *bundleNamespaceMappings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ result = &v1alpha1.BundleNamespaceMapping{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("bundlenamespacemappings").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/cluster.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/cluster.go
new file mode 100644
index 0000000..cfb45f9
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/cluster.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClustersGetter has a method to return a ClusterInterface.
+// A group's client should implement this interface.
+type ClustersGetter interface {
+ Clusters(namespace string) ClusterInterface
+}
+
+// ClusterInterface has methods to work with Cluster resources.
+type ClusterInterface interface {
+ Create(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.CreateOptions) (*v1alpha1.Cluster, error)
+ Update(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (*v1alpha1.Cluster, error)
+ UpdateStatus(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (*v1alpha1.Cluster, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Cluster, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Cluster, err error)
+ ClusterExpansion
+}
+
+// clusters implements ClusterInterface
+type clusters struct {
+ client rest.Interface
+ ns string
+}
+
+// newClusters returns a Clusters
+func newClusters(c *FleetV1alpha1Client, namespace string) *clusters {
+ return &clusters{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any.
+func (c *clusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) {
+ result = &v1alpha1.Cluster{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clusters").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Clusters that match those selectors.
+func (c *clusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ClusterList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clusters").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusters.
+func (c *clusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("clusters").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any.
+func (c *clusters) Create(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.CreateOptions) (result *v1alpha1.Cluster, err error) {
+ result = &v1alpha1.Cluster{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("clusters").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cluster).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any.
+func (c *clusters) Update(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (result *v1alpha1.Cluster, err error) {
+ result = &v1alpha1.Cluster{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clusters").
+ Name(cluster.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cluster).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusters) UpdateStatus(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (result *v1alpha1.Cluster, err error) {
+ result = &v1alpha1.Cluster{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clusters").
+ Name(cluster.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cluster).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the cluster and deletes it. Returns an error if one occurs.
+func (c *clusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clusters").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clusters").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched cluster.
+func (c *clusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Cluster, err error) {
+ result = &v1alpha1.Cluster{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("clusters").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clustergroup.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clustergroup.go
new file mode 100644
index 0000000..bd43095
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clustergroup.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClusterGroupsGetter has a method to return a ClusterGroupInterface.
+// A group's client should implement this interface.
+type ClusterGroupsGetter interface {
+ ClusterGroups(namespace string) ClusterGroupInterface
+}
+
+// ClusterGroupInterface has methods to work with ClusterGroup resources.
+type ClusterGroupInterface interface {
+ Create(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.CreateOptions) (*v1alpha1.ClusterGroup, error)
+ Update(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.UpdateOptions) (*v1alpha1.ClusterGroup, error)
+ UpdateStatus(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.UpdateOptions) (*v1alpha1.ClusterGroup, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterGroup, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterGroupList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterGroup, err error)
+ ClusterGroupExpansion
+}
+
+// clusterGroups implements ClusterGroupInterface
+type clusterGroups struct {
+ client rest.Interface
+ ns string
+}
+
+// newClusterGroups returns a ClusterGroups
+func newClusterGroups(c *FleetV1alpha1Client, namespace string) *clusterGroups {
+ return &clusterGroups{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the clusterGroup, and returns the corresponding clusterGroup object, and an error if there is any.
+func (c *clusterGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterGroup, err error) {
+ result = &v1alpha1.ClusterGroup{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClusterGroups that match those selectors.
+func (c *clusterGroups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterGroupList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ClusterGroupList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterGroups.
+func (c *clusterGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clusterGroup and creates it. Returns the server's representation of the clusterGroup, and an error, if there is any.
+func (c *clusterGroups) Create(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.CreateOptions) (result *v1alpha1.ClusterGroup, err error) {
+ result = &v1alpha1.ClusterGroup{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterGroup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clusterGroup and updates it. Returns the server's representation of the clusterGroup, and an error, if there is any.
+func (c *clusterGroups) Update(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.UpdateOptions) (result *v1alpha1.ClusterGroup, err error) {
+ result = &v1alpha1.ClusterGroup{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ Name(clusterGroup.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterGroup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusterGroups) UpdateStatus(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.UpdateOptions) (result *v1alpha1.ClusterGroup, err error) {
+ result = &v1alpha1.ClusterGroup{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ Name(clusterGroup.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterGroup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clusterGroup and deletes it. Returns an error if one occurs.
+func (c *clusterGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clustergroups").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clusterGroup.
+func (c *clusterGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterGroup, err error) {
+ result = &v1alpha1.ClusterGroup{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("clustergroups").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistration.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistration.go
new file mode 100644
index 0000000..132006c
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistration.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClusterRegistrationsGetter has a method to return a ClusterRegistrationInterface.
+// A group's client should implement this interface.
+type ClusterRegistrationsGetter interface {
+ ClusterRegistrations(namespace string) ClusterRegistrationInterface
+}
+
+// ClusterRegistrationInterface has methods to work with ClusterRegistration resources.
+type ClusterRegistrationInterface interface {
+ Create(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.CreateOptions) (*v1alpha1.ClusterRegistration, error)
+ Update(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.UpdateOptions) (*v1alpha1.ClusterRegistration, error)
+ UpdateStatus(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.UpdateOptions) (*v1alpha1.ClusterRegistration, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRegistration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRegistrationList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRegistration, err error)
+ ClusterRegistrationExpansion
+}
+
+// clusterRegistrations implements ClusterRegistrationInterface
+type clusterRegistrations struct {
+ client rest.Interface
+ ns string
+}
+
+// newClusterRegistrations returns a ClusterRegistrations
+func newClusterRegistrations(c *FleetV1alpha1Client, namespace string) *clusterRegistrations {
+ return &clusterRegistrations{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the clusterRegistration, and returns the corresponding clusterRegistration object, and an error if there is any.
+func (c *clusterRegistrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ result = &v1alpha1.ClusterRegistration{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRegistrations that match those selectors.
+func (c *clusterRegistrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRegistrationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ClusterRegistrationList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRegistrations.
+func (c *clusterRegistrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clusterRegistration and creates it. Returns the server's representation of the clusterRegistration, and an error, if there is any.
+func (c *clusterRegistrations) Create(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.CreateOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ result = &v1alpha1.ClusterRegistration{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clusterRegistration and updates it. Returns the server's representation of the clusterRegistration, and an error, if there is any.
+func (c *clusterRegistrations) Update(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.UpdateOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ result = &v1alpha1.ClusterRegistration{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ Name(clusterRegistration.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusterRegistrations) UpdateStatus(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.UpdateOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ result = &v1alpha1.ClusterRegistration{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ Name(clusterRegistration.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clusterRegistration and deletes it. Returns an error if one occurs.
+func (c *clusterRegistrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRegistrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clusterRegistration.
+func (c *clusterRegistrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRegistration, err error) {
+ result = &v1alpha1.ClusterRegistration{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("clusterregistrations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistrationtoken.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistrationtoken.go
new file mode 100644
index 0000000..b47db3c
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/clusterregistrationtoken.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClusterRegistrationTokensGetter has a method to return a ClusterRegistrationTokenInterface.
+// A group's client should implement this interface.
+type ClusterRegistrationTokensGetter interface {
+ ClusterRegistrationTokens(namespace string) ClusterRegistrationTokenInterface
+}
+
+// ClusterRegistrationTokenInterface has methods to work with ClusterRegistrationToken resources.
+type ClusterRegistrationTokenInterface interface {
+ Create(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.CreateOptions) (*v1alpha1.ClusterRegistrationToken, error)
+ Update(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.UpdateOptions) (*v1alpha1.ClusterRegistrationToken, error)
+ UpdateStatus(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.UpdateOptions) (*v1alpha1.ClusterRegistrationToken, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRegistrationToken, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRegistrationTokenList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRegistrationToken, err error)
+ ClusterRegistrationTokenExpansion
+}
+
+// clusterRegistrationTokens implements ClusterRegistrationTokenInterface
+type clusterRegistrationTokens struct {
+ client rest.Interface
+ ns string
+}
+
+// newClusterRegistrationTokens returns a ClusterRegistrationTokens
+func newClusterRegistrationTokens(c *FleetV1alpha1Client, namespace string) *clusterRegistrationTokens {
+ return &clusterRegistrationTokens{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the clusterRegistrationToken, and returns the corresponding clusterRegistrationToken object, and an error if there is any.
+func (c *clusterRegistrationTokens) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ result = &v1alpha1.ClusterRegistrationToken{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClusterRegistrationTokens that match those selectors.
+func (c *clusterRegistrationTokens) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRegistrationTokenList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ClusterRegistrationTokenList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRegistrationTokens.
+func (c *clusterRegistrationTokens) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clusterRegistrationToken and creates it. Returns the server's representation of the clusterRegistrationToken, and an error, if there is any.
+func (c *clusterRegistrationTokens) Create(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.CreateOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ result = &v1alpha1.ClusterRegistrationToken{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterRegistrationToken).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clusterRegistrationToken and updates it. Returns the server's representation of the clusterRegistrationToken, and an error, if there is any.
+func (c *clusterRegistrationTokens) Update(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.UpdateOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ result = &v1alpha1.ClusterRegistrationToken{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ Name(clusterRegistrationToken.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterRegistrationToken).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusterRegistrationTokens) UpdateStatus(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.UpdateOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ result = &v1alpha1.ClusterRegistrationToken{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ Name(clusterRegistrationToken.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterRegistrationToken).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clusterRegistrationToken and deletes it. Returns an error if one occurs.
+func (c *clusterRegistrationTokens) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterRegistrationTokens) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clusterRegistrationToken.
+func (c *clusterRegistrationTokens) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ result = &v1alpha1.ClusterRegistrationToken{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("clusterregistrationtokens").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/content.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/content.go
new file mode 100644
index 0000000..7a6ab3a
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/content.go
@@ -0,0 +1,168 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ContentsGetter has a method to return a ContentInterface.
+// A group's client should implement this interface.
+type ContentsGetter interface {
+ Contents() ContentInterface
+}
+
+// ContentInterface has methods to work with Content resources.
+type ContentInterface interface {
+ Create(ctx context.Context, content *v1alpha1.Content, opts v1.CreateOptions) (*v1alpha1.Content, error)
+ Update(ctx context.Context, content *v1alpha1.Content, opts v1.UpdateOptions) (*v1alpha1.Content, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Content, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ContentList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Content, err error)
+ ContentExpansion
+}
+
+// contents implements ContentInterface
+type contents struct {
+ client rest.Interface
+}
+
+// newContents returns a Contents
+func newContents(c *FleetV1alpha1Client) *contents {
+ return &contents{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the content, and returns the corresponding content object, and an error if there is any.
+func (c *contents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Content, err error) {
+ result = &v1alpha1.Content{}
+ err = c.client.Get().
+ Resource("contents").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Contents that match those selectors.
+func (c *contents) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ContentList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ContentList{}
+ err = c.client.Get().
+ Resource("contents").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested contents.
+func (c *contents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("contents").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a content and creates it. Returns the server's representation of the content, and an error, if there is any.
+func (c *contents) Create(ctx context.Context, content *v1alpha1.Content, opts v1.CreateOptions) (result *v1alpha1.Content, err error) {
+ result = &v1alpha1.Content{}
+ err = c.client.Post().
+ Resource("contents").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(content).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a content and updates it. Returns the server's representation of the content, and an error, if there is any.
+func (c *contents) Update(ctx context.Context, content *v1alpha1.Content, opts v1.UpdateOptions) (result *v1alpha1.Content, err error) {
+ result = &v1alpha1.Content{}
+ err = c.client.Put().
+ Resource("contents").
+ Name(content.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(content).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the content and deletes it. Returns an error if one occurs.
+func (c *contents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("contents").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *contents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("contents").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched content.
+func (c *contents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Content, err error) {
+ result = &v1alpha1.Content{}
+ err = c.client.Patch(pt).
+ Resource("contents").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/doc.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/doc.go
new file mode 100644
index 0000000..828c8eb
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/doc.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/doc.go
new file mode 100644
index 0000000..c6d504e
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundle.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundle.go
new file mode 100644
index 0000000..42a9c03
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundle.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeBundles implements BundleInterface
+type FakeBundles struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var bundlesResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "bundles"}
+
+var bundlesKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "Bundle"}
+
+// Get takes name of the bundle, and returns the corresponding bundle object, and an error if there is any.
+func (c *FakeBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Bundle, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(bundlesResource, c.ns, name), &v1alpha1.Bundle{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Bundle), err
+}
+
+// List takes label and field selectors, and returns the list of Bundles that match those selectors.
+func (c *FakeBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BundleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(bundlesResource, bundlesKind, c.ns, opts), &v1alpha1.BundleList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.BundleList{ListMeta: obj.(*v1alpha1.BundleList).ListMeta}
+ for _, item := range obj.(*v1alpha1.BundleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested bundles.
+func (c *FakeBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(bundlesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a bundle and creates it. Returns the server's representation of the bundle, and an error, if there is any.
+func (c *FakeBundles) Create(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.CreateOptions) (result *v1alpha1.Bundle, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(bundlesResource, c.ns, bundle), &v1alpha1.Bundle{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Bundle), err
+}
+
+// Update takes the representation of a bundle and updates it. Returns the server's representation of the bundle, and an error, if there is any.
+func (c *FakeBundles) Update(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.UpdateOptions) (result *v1alpha1.Bundle, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(bundlesResource, c.ns, bundle), &v1alpha1.Bundle{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Bundle), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeBundles) UpdateStatus(ctx context.Context, bundle *v1alpha1.Bundle, opts v1.UpdateOptions) (*v1alpha1.Bundle, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(bundlesResource, "status", c.ns, bundle), &v1alpha1.Bundle{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Bundle), err
+}
+
+// Delete takes name of the bundle and deletes it. Returns an error if one occurs.
+func (c *FakeBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(bundlesResource, c.ns, name), &v1alpha1.Bundle{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(bundlesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.BundleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched bundle.
+func (c *FakeBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Bundle, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(bundlesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Bundle{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Bundle), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundledeployment.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundledeployment.go
new file mode 100644
index 0000000..97472ff
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundledeployment.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeBundleDeployments implements BundleDeploymentInterface
+type FakeBundleDeployments struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var bundledeploymentsResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "bundledeployments"}
+
+var bundledeploymentsKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "BundleDeployment"}
+
+// Get takes name of the bundleDeployment, and returns the corresponding bundleDeployment object, and an error if there is any.
+func (c *FakeBundleDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BundleDeployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(bundledeploymentsResource, c.ns, name), &v1alpha1.BundleDeployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleDeployment), err
+}
+
+// List takes label and field selectors, and returns the list of BundleDeployments that match those selectors.
+func (c *FakeBundleDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BundleDeploymentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(bundledeploymentsResource, bundledeploymentsKind, c.ns, opts), &v1alpha1.BundleDeploymentList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.BundleDeploymentList{ListMeta: obj.(*v1alpha1.BundleDeploymentList).ListMeta}
+ for _, item := range obj.(*v1alpha1.BundleDeploymentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested bundleDeployments.
+func (c *FakeBundleDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(bundledeploymentsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a bundleDeployment and creates it. Returns the server's representation of the bundleDeployment, and an error, if there is any.
+func (c *FakeBundleDeployments) Create(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.CreateOptions) (result *v1alpha1.BundleDeployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(bundledeploymentsResource, c.ns, bundleDeployment), &v1alpha1.BundleDeployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleDeployment), err
+}
+
+// Update takes the representation of a bundleDeployment and updates it. Returns the server's representation of the bundleDeployment, and an error, if there is any.
+func (c *FakeBundleDeployments) Update(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.UpdateOptions) (result *v1alpha1.BundleDeployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(bundledeploymentsResource, c.ns, bundleDeployment), &v1alpha1.BundleDeployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleDeployment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeBundleDeployments) UpdateStatus(ctx context.Context, bundleDeployment *v1alpha1.BundleDeployment, opts v1.UpdateOptions) (*v1alpha1.BundleDeployment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(bundledeploymentsResource, "status", c.ns, bundleDeployment), &v1alpha1.BundleDeployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleDeployment), err
+}
+
+// Delete takes name of the bundleDeployment and deletes it. Returns an error if one occurs.
+func (c *FakeBundleDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(bundledeploymentsResource, c.ns, name), &v1alpha1.BundleDeployment{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeBundleDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(bundledeploymentsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.BundleDeploymentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched bundleDeployment.
+func (c *FakeBundleDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BundleDeployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(bundledeploymentsResource, c.ns, name, pt, data, subresources...), &v1alpha1.BundleDeployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleDeployment), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundlenamespacemapping.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundlenamespacemapping.go
new file mode 100644
index 0000000..1de5d65
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_bundlenamespacemapping.go
@@ -0,0 +1,130 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeBundleNamespaceMappings implements BundleNamespaceMappingInterface
+type FakeBundleNamespaceMappings struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var bundlenamespacemappingsResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "bundlenamespacemappings"}
+
+var bundlenamespacemappingsKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "BundleNamespaceMapping"}
+
+// Get takes name of the bundleNamespaceMapping, and returns the corresponding bundleNamespaceMapping object, and an error if there is any.
+func (c *FakeBundleNamespaceMappings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(bundlenamespacemappingsResource, c.ns, name), &v1alpha1.BundleNamespaceMapping{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleNamespaceMapping), err
+}
+
+// List takes label and field selectors, and returns the list of BundleNamespaceMappings that match those selectors.
+func (c *FakeBundleNamespaceMappings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BundleNamespaceMappingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(bundlenamespacemappingsResource, bundlenamespacemappingsKind, c.ns, opts), &v1alpha1.BundleNamespaceMappingList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.BundleNamespaceMappingList{ListMeta: obj.(*v1alpha1.BundleNamespaceMappingList).ListMeta}
+ for _, item := range obj.(*v1alpha1.BundleNamespaceMappingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested bundleNamespaceMappings.
+func (c *FakeBundleNamespaceMappings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(bundlenamespacemappingsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a bundleNamespaceMapping and creates it. Returns the server's representation of the bundleNamespaceMapping, and an error, if there is any.
+func (c *FakeBundleNamespaceMappings) Create(ctx context.Context, bundleNamespaceMapping *v1alpha1.BundleNamespaceMapping, opts v1.CreateOptions) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(bundlenamespacemappingsResource, c.ns, bundleNamespaceMapping), &v1alpha1.BundleNamespaceMapping{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleNamespaceMapping), err
+}
+
+// Update takes the representation of a bundleNamespaceMapping and updates it. Returns the server's representation of the bundleNamespaceMapping, and an error, if there is any.
+func (c *FakeBundleNamespaceMappings) Update(ctx context.Context, bundleNamespaceMapping *v1alpha1.BundleNamespaceMapping, opts v1.UpdateOptions) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(bundlenamespacemappingsResource, c.ns, bundleNamespaceMapping), &v1alpha1.BundleNamespaceMapping{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleNamespaceMapping), err
+}
+
+// Delete takes name of the bundleNamespaceMapping and deletes it. Returns an error if one occurs.
+func (c *FakeBundleNamespaceMappings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(bundlenamespacemappingsResource, c.ns, name), &v1alpha1.BundleNamespaceMapping{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeBundleNamespaceMappings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(bundlenamespacemappingsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.BundleNamespaceMappingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched bundleNamespaceMapping.
+func (c *FakeBundleNamespaceMappings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BundleNamespaceMapping, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(bundlenamespacemappingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.BundleNamespaceMapping{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.BundleNamespaceMapping), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_cluster.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_cluster.go
new file mode 100644
index 0000000..44ddf91
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_cluster.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusters implements ClusterInterface
+type FakeClusters struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var clustersResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "clusters"}
+
+var clustersKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "Cluster"}
+
+// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any.
+func (c *FakeClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(clustersResource, c.ns, name), &v1alpha1.Cluster{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Cluster), err
+}
+
+// List takes label and field selectors, and returns the list of Clusters that match those selectors.
+func (c *FakeClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(clustersResource, clustersKind, c.ns, opts), &v1alpha1.ClusterList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClusterList{ListMeta: obj.(*v1alpha1.ClusterList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClusterList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusters.
+func (c *FakeClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(clustersResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any.
+func (c *FakeClusters) Create(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.CreateOptions) (result *v1alpha1.Cluster, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Cluster), err
+}
+
+// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any.
+func (c *FakeClusters) Update(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (result *v1alpha1.Cluster, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Cluster), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeClusters) UpdateStatus(ctx context.Context, cluster *v1alpha1.Cluster, opts v1.UpdateOptions) (*v1alpha1.Cluster, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(clustersResource, "status", c.ns, cluster), &v1alpha1.Cluster{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Cluster), err
+}
+
+// Delete takes name of the cluster and deletes it. Returns an error if one occurs.
+func (c *FakeClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(clustersResource, c.ns, name), &v1alpha1.Cluster{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(clustersResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClusterList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched cluster.
+func (c *FakeClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Cluster, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(clustersResource, c.ns, name, pt, data, subresources...), &v1alpha1.Cluster{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Cluster), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clustergroup.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clustergroup.go
new file mode 100644
index 0000000..565a0bd
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clustergroup.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterGroups implements ClusterGroupInterface
+type FakeClusterGroups struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var clustergroupsResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "clustergroups"}
+
+var clustergroupsKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "ClusterGroup"}
+
+// Get takes name of the clusterGroup, and returns the corresponding clusterGroup object, and an error if there is any.
+func (c *FakeClusterGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(clustergroupsResource, c.ns, name), &v1alpha1.ClusterGroup{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterGroup), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterGroups that match those selectors.
+func (c *FakeClusterGroups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterGroupList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(clustergroupsResource, clustergroupsKind, c.ns, opts), &v1alpha1.ClusterGroupList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClusterGroupList{ListMeta: obj.(*v1alpha1.ClusterGroupList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClusterGroupList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterGroups.
+func (c *FakeClusterGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(clustergroupsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a clusterGroup and creates it. Returns the server's representation of the clusterGroup, and an error, if there is any.
+func (c *FakeClusterGroups) Create(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.CreateOptions) (result *v1alpha1.ClusterGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(clustergroupsResource, c.ns, clusterGroup), &v1alpha1.ClusterGroup{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterGroup), err
+}
+
+// Update takes the representation of a clusterGroup and updates it. Returns the server's representation of the clusterGroup, and an error, if there is any.
+func (c *FakeClusterGroups) Update(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.UpdateOptions) (result *v1alpha1.ClusterGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(clustergroupsResource, c.ns, clusterGroup), &v1alpha1.ClusterGroup{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterGroup), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeClusterGroups) UpdateStatus(ctx context.Context, clusterGroup *v1alpha1.ClusterGroup, opts v1.UpdateOptions) (*v1alpha1.ClusterGroup, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(clustergroupsResource, "status", c.ns, clusterGroup), &v1alpha1.ClusterGroup{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterGroup), err
+}
+
+// Delete takes name of the clusterGroup and deletes it. Returns an error if one occurs.
+func (c *FakeClusterGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(clustergroupsResource, c.ns, name), &v1alpha1.ClusterGroup{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(clustergroupsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClusterGroupList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterGroup.
+func (c *FakeClusterGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(clustergroupsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ClusterGroup{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterGroup), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistration.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistration.go
new file mode 100644
index 0000000..1226386
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistration.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRegistrations implements ClusterRegistrationInterface
+type FakeClusterRegistrations struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var clusterregistrationsResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "clusterregistrations"}
+
+var clusterregistrationsKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "ClusterRegistration"}
+
+// Get takes name of the clusterRegistration, and returns the corresponding clusterRegistration object, and an error if there is any.
+func (c *FakeClusterRegistrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(clusterregistrationsResource, c.ns, name), &v1alpha1.ClusterRegistration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistration), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRegistrations that match those selectors.
+func (c *FakeClusterRegistrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRegistrationList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(clusterregistrationsResource, clusterregistrationsKind, c.ns, opts), &v1alpha1.ClusterRegistrationList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClusterRegistrationList{ListMeta: obj.(*v1alpha1.ClusterRegistrationList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClusterRegistrationList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRegistrations.
+func (c *FakeClusterRegistrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(clusterregistrationsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a clusterRegistration and creates it. Returns the server's representation of the clusterRegistration, and an error, if there is any.
+func (c *FakeClusterRegistrations) Create(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.CreateOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(clusterregistrationsResource, c.ns, clusterRegistration), &v1alpha1.ClusterRegistration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistration), err
+}
+
+// Update takes the representation of a clusterRegistration and updates it. Returns the server's representation of the clusterRegistration, and an error, if there is any.
+func (c *FakeClusterRegistrations) Update(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.UpdateOptions) (result *v1alpha1.ClusterRegistration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(clusterregistrationsResource, c.ns, clusterRegistration), &v1alpha1.ClusterRegistration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistration), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeClusterRegistrations) UpdateStatus(ctx context.Context, clusterRegistration *v1alpha1.ClusterRegistration, opts v1.UpdateOptions) (*v1alpha1.ClusterRegistration, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(clusterregistrationsResource, "status", c.ns, clusterRegistration), &v1alpha1.ClusterRegistration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistration), err
+}
+
+// Delete takes name of the clusterRegistration and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRegistrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(clusterregistrationsResource, c.ns, name), &v1alpha1.ClusterRegistration{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRegistrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(clusterregistrationsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRegistrationList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRegistration.
+func (c *FakeClusterRegistrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRegistration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(clusterregistrationsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ClusterRegistration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistration), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistrationtoken.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistrationtoken.go
new file mode 100644
index 0000000..98bfcaa
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_clusterregistrationtoken.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRegistrationTokens implements ClusterRegistrationTokenInterface
+type FakeClusterRegistrationTokens struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var clusterregistrationtokensResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "clusterregistrationtokens"}
+
+var clusterregistrationtokensKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "ClusterRegistrationToken"}
+
+// Get takes name of the clusterRegistrationToken, and returns the corresponding clusterRegistrationToken object, and an error if there is any.
+func (c *FakeClusterRegistrationTokens) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(clusterregistrationtokensResource, c.ns, name), &v1alpha1.ClusterRegistrationToken{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistrationToken), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRegistrationTokens that match those selectors.
+func (c *FakeClusterRegistrationTokens) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRegistrationTokenList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(clusterregistrationtokensResource, clusterregistrationtokensKind, c.ns, opts), &v1alpha1.ClusterRegistrationTokenList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClusterRegistrationTokenList{ListMeta: obj.(*v1alpha1.ClusterRegistrationTokenList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClusterRegistrationTokenList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRegistrationTokens.
+func (c *FakeClusterRegistrationTokens) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(clusterregistrationtokensResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a clusterRegistrationToken and creates it. Returns the server's representation of the clusterRegistrationToken, and an error, if there is any.
+func (c *FakeClusterRegistrationTokens) Create(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.CreateOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(clusterregistrationtokensResource, c.ns, clusterRegistrationToken), &v1alpha1.ClusterRegistrationToken{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistrationToken), err
+}
+
+// Update takes the representation of a clusterRegistrationToken and updates it. Returns the server's representation of the clusterRegistrationToken, and an error, if there is any.
+func (c *FakeClusterRegistrationTokens) Update(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.UpdateOptions) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(clusterregistrationtokensResource, c.ns, clusterRegistrationToken), &v1alpha1.ClusterRegistrationToken{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistrationToken), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeClusterRegistrationTokens) UpdateStatus(ctx context.Context, clusterRegistrationToken *v1alpha1.ClusterRegistrationToken, opts v1.UpdateOptions) (*v1alpha1.ClusterRegistrationToken, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(clusterregistrationtokensResource, "status", c.ns, clusterRegistrationToken), &v1alpha1.ClusterRegistrationToken{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistrationToken), err
+}
+
+// Delete takes name of the clusterRegistrationToken and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRegistrationTokens) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(clusterregistrationtokensResource, c.ns, name), &v1alpha1.ClusterRegistrationToken{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRegistrationTokens) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(clusterregistrationtokensResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRegistrationTokenList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRegistrationToken.
+func (c *FakeClusterRegistrationTokens) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRegistrationToken, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(clusterregistrationtokensResource, c.ns, name, pt, data, subresources...), &v1alpha1.ClusterRegistrationToken{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRegistrationToken), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_content.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_content.go
new file mode 100644
index 0000000..a925f41
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_content.go
@@ -0,0 +1,122 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeContents implements ContentInterface
+type FakeContents struct {
+ Fake *FakeFleetV1alpha1
+}
+
+var contentsResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "contents"}
+
+var contentsKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "Content"}
+
+// Get takes name of the content, and returns the corresponding content object, and an error if there is any.
+func (c *FakeContents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Content, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(contentsResource, name), &v1alpha1.Content{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Content), err
+}
+
+// List takes label and field selectors, and returns the list of Contents that match those selectors.
+func (c *FakeContents) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ContentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(contentsResource, contentsKind, opts), &v1alpha1.ContentList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ContentList{ListMeta: obj.(*v1alpha1.ContentList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ContentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested contents.
+func (c *FakeContents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(contentsResource, opts))
+}
+
+// Create takes the representation of a content and creates it. Returns the server's representation of the content, and an error, if there is any.
+func (c *FakeContents) Create(ctx context.Context, content *v1alpha1.Content, opts v1.CreateOptions) (result *v1alpha1.Content, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(contentsResource, content), &v1alpha1.Content{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Content), err
+}
+
+// Update takes the representation of a content and updates it. Returns the server's representation of the content, and an error, if there is any.
+func (c *FakeContents) Update(ctx context.Context, content *v1alpha1.Content, opts v1.UpdateOptions) (result *v1alpha1.Content, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(contentsResource, content), &v1alpha1.Content{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Content), err
+}
+
+// Delete takes name of the content and deletes it. Returns an error if one occurs.
+func (c *FakeContents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(contentsResource, name), &v1alpha1.Content{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeContents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(contentsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ContentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched content.
+func (c *FakeContents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Content, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(contentsResource, name, pt, data, subresources...), &v1alpha1.Content{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Content), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_fleet.cattle.io_client.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_fleet.cattle.io_client.go
new file mode 100644
index 0000000..366d2fd
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_fleet.cattle.io_client.go
@@ -0,0 +1,76 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeFleetV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeFleetV1alpha1) Bundles(namespace string) v1alpha1.BundleInterface {
+ return &FakeBundles{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) BundleDeployments(namespace string) v1alpha1.BundleDeploymentInterface {
+ return &FakeBundleDeployments{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) BundleNamespaceMappings(namespace string) v1alpha1.BundleNamespaceMappingInterface {
+ return &FakeBundleNamespaceMappings{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) Clusters(namespace string) v1alpha1.ClusterInterface {
+ return &FakeClusters{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) ClusterGroups(namespace string) v1alpha1.ClusterGroupInterface {
+ return &FakeClusterGroups{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) ClusterRegistrations(namespace string) v1alpha1.ClusterRegistrationInterface {
+ return &FakeClusterRegistrations{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) ClusterRegistrationTokens(namespace string) v1alpha1.ClusterRegistrationTokenInterface {
+ return &FakeClusterRegistrationTokens{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) Contents() v1alpha1.ContentInterface {
+ return &FakeContents{c}
+}
+
+func (c *FakeFleetV1alpha1) GitRepos(namespace string) v1alpha1.GitRepoInterface {
+ return &FakeGitRepos{c, namespace}
+}
+
+func (c *FakeFleetV1alpha1) GitRepoRestrictions(namespace string) v1alpha1.GitRepoRestrictionInterface {
+ return &FakeGitRepoRestrictions{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeFleetV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitrepo.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitrepo.go
new file mode 100644
index 0000000..b6adcac
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitrepo.go
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeGitRepos implements GitRepoInterface
+type FakeGitRepos struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var gitreposResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "gitrepos"}
+
+var gitreposKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "GitRepo"}
+
+// Get takes name of the gitRepo, and returns the corresponding gitRepo object, and an error if there is any.
+func (c *FakeGitRepos) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.GitRepo, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(gitreposResource, c.ns, name), &v1alpha1.GitRepo{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepo), err
+}
+
+// List takes label and field selectors, and returns the list of GitRepos that match those selectors.
+func (c *FakeGitRepos) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GitRepoList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(gitreposResource, gitreposKind, c.ns, opts), &v1alpha1.GitRepoList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.GitRepoList{ListMeta: obj.(*v1alpha1.GitRepoList).ListMeta}
+ for _, item := range obj.(*v1alpha1.GitRepoList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested gitRepos.
+func (c *FakeGitRepos) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(gitreposResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a gitRepo and creates it. Returns the server's representation of the gitRepo, and an error, if there is any.
+func (c *FakeGitRepos) Create(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.CreateOptions) (result *v1alpha1.GitRepo, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(gitreposResource, c.ns, gitRepo), &v1alpha1.GitRepo{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepo), err
+}
+
+// Update takes the representation of a gitRepo and updates it. Returns the server's representation of the gitRepo, and an error, if there is any.
+func (c *FakeGitRepos) Update(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.UpdateOptions) (result *v1alpha1.GitRepo, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(gitreposResource, c.ns, gitRepo), &v1alpha1.GitRepo{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepo), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeGitRepos) UpdateStatus(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.UpdateOptions) (*v1alpha1.GitRepo, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(gitreposResource, "status", c.ns, gitRepo), &v1alpha1.GitRepo{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepo), err
+}
+
+// Delete takes name of the gitRepo and deletes it. Returns an error if one occurs.
+func (c *FakeGitRepos) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(gitreposResource, c.ns, name), &v1alpha1.GitRepo{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeGitRepos) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(gitreposResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.GitRepoList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched gitRepo.
+func (c *FakeGitRepos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GitRepo, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(gitreposResource, c.ns, name, pt, data, subresources...), &v1alpha1.GitRepo{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepo), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitreporestriction.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitreporestriction.go
new file mode 100644
index 0000000..4115186
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fake/fake_gitreporestriction.go
@@ -0,0 +1,130 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeGitRepoRestrictions implements GitRepoRestrictionInterface
+type FakeGitRepoRestrictions struct {
+ Fake *FakeFleetV1alpha1
+ ns string
+}
+
+var gitreporestrictionsResource = schema.GroupVersionResource{Group: "fleet.cattle.io", Version: "v1alpha1", Resource: "gitreporestrictions"}
+
+var gitreporestrictionsKind = schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "GitRepoRestriction"}
+
+// Get takes name of the gitRepoRestriction, and returns the corresponding gitRepoRestriction object, and an error if there is any.
+func (c *FakeGitRepoRestrictions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.GitRepoRestriction, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(gitreporestrictionsResource, c.ns, name), &v1alpha1.GitRepoRestriction{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepoRestriction), err
+}
+
+// List takes label and field selectors, and returns the list of GitRepoRestrictions that match those selectors.
+func (c *FakeGitRepoRestrictions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GitRepoRestrictionList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(gitreporestrictionsResource, gitreporestrictionsKind, c.ns, opts), &v1alpha1.GitRepoRestrictionList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.GitRepoRestrictionList{ListMeta: obj.(*v1alpha1.GitRepoRestrictionList).ListMeta}
+ for _, item := range obj.(*v1alpha1.GitRepoRestrictionList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested gitRepoRestrictions.
+func (c *FakeGitRepoRestrictions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(gitreporestrictionsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a gitRepoRestriction and creates it. Returns the server's representation of the gitRepoRestriction, and an error, if there is any.
+func (c *FakeGitRepoRestrictions) Create(ctx context.Context, gitRepoRestriction *v1alpha1.GitRepoRestriction, opts v1.CreateOptions) (result *v1alpha1.GitRepoRestriction, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(gitreporestrictionsResource, c.ns, gitRepoRestriction), &v1alpha1.GitRepoRestriction{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepoRestriction), err
+}
+
+// Update takes the representation of a gitRepoRestriction and updates it. Returns the server's representation of the gitRepoRestriction, and an error, if there is any.
+func (c *FakeGitRepoRestrictions) Update(ctx context.Context, gitRepoRestriction *v1alpha1.GitRepoRestriction, opts v1.UpdateOptions) (result *v1alpha1.GitRepoRestriction, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(gitreporestrictionsResource, c.ns, gitRepoRestriction), &v1alpha1.GitRepoRestriction{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepoRestriction), err
+}
+
+// Delete takes name of the gitRepoRestriction and deletes it. Returns an error if one occurs.
+func (c *FakeGitRepoRestrictions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(gitreporestrictionsResource, c.ns, name), &v1alpha1.GitRepoRestriction{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeGitRepoRestrictions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(gitreporestrictionsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.GitRepoRestrictionList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched gitRepoRestriction.
+func (c *FakeGitRepoRestrictions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GitRepoRestriction, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(gitreporestrictionsResource, c.ns, name, pt, data, subresources...), &v1alpha1.GitRepoRestriction{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GitRepoRestriction), err
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fleet.cattle.io_client.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fleet.cattle.io_client.go
new file mode 100644
index 0000000..5d6e889
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/fleet.cattle.io_client.go
@@ -0,0 +1,134 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ rest "k8s.io/client-go/rest"
+)
+
+type FleetV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ BundlesGetter
+ BundleDeploymentsGetter
+ BundleNamespaceMappingsGetter
+ ClustersGetter
+ ClusterGroupsGetter
+ ClusterRegistrationsGetter
+ ClusterRegistrationTokensGetter
+ ContentsGetter
+ GitReposGetter
+ GitRepoRestrictionsGetter
+}
+
+// FleetV1alpha1Client is used to interact with features provided by the fleet.cattle.io group.
+type FleetV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *FleetV1alpha1Client) Bundles(namespace string) BundleInterface {
+ return newBundles(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) BundleDeployments(namespace string) BundleDeploymentInterface {
+ return newBundleDeployments(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) BundleNamespaceMappings(namespace string) BundleNamespaceMappingInterface {
+ return newBundleNamespaceMappings(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) Clusters(namespace string) ClusterInterface {
+ return newClusters(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) ClusterGroups(namespace string) ClusterGroupInterface {
+ return newClusterGroups(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) ClusterRegistrations(namespace string) ClusterRegistrationInterface {
+ return newClusterRegistrations(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) ClusterRegistrationTokens(namespace string) ClusterRegistrationTokenInterface {
+ return newClusterRegistrationTokens(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) Contents() ContentInterface {
+ return newContents(c)
+}
+
+func (c *FleetV1alpha1Client) GitRepos(namespace string) GitRepoInterface {
+ return newGitRepos(c, namespace)
+}
+
+func (c *FleetV1alpha1Client) GitRepoRestrictions(namespace string) GitRepoRestrictionInterface {
+ return newGitRepoRestrictions(c, namespace)
+}
+
+// NewForConfig creates a new FleetV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*FleetV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &FleetV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new FleetV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *FleetV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new FleetV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *FleetV1alpha1Client {
+ return &FleetV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FleetV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/generated_expansion.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..568e371
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/generated_expansion.go
@@ -0,0 +1,39 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type BundleExpansion interface{}
+
+type BundleDeploymentExpansion interface{}
+
+type BundleNamespaceMappingExpansion interface{}
+
+type ClusterExpansion interface{}
+
+type ClusterGroupExpansion interface{}
+
+type ClusterRegistrationExpansion interface{}
+
+type ClusterRegistrationTokenExpansion interface{}
+
+type ContentExpansion interface{}
+
+type GitRepoExpansion interface{}
+
+type GitRepoRestrictionExpansion interface{}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitrepo.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitrepo.go
new file mode 100644
index 0000000..da10af1
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitrepo.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// GitReposGetter has a method to return a GitRepoInterface.
+// A group's client should implement this interface.
+type GitReposGetter interface {
+ GitRepos(namespace string) GitRepoInterface
+}
+
+// GitRepoInterface has methods to work with GitRepo resources.
+type GitRepoInterface interface {
+ Create(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.CreateOptions) (*v1alpha1.GitRepo, error)
+ Update(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.UpdateOptions) (*v1alpha1.GitRepo, error)
+ UpdateStatus(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.UpdateOptions) (*v1alpha1.GitRepo, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.GitRepo, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.GitRepoList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GitRepo, err error)
+ GitRepoExpansion
+}
+
+// gitRepos implements GitRepoInterface
+type gitRepos struct {
+ client rest.Interface
+ ns string
+}
+
+// newGitRepos returns a GitRepos
+func newGitRepos(c *FleetV1alpha1Client, namespace string) *gitRepos {
+ return &gitRepos{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the gitRepo, and returns the corresponding gitRepo object, and an error if there is any.
+func (c *gitRepos) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.GitRepo, err error) {
+ result = &v1alpha1.GitRepo{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of GitRepos that match those selectors.
+func (c *gitRepos) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GitRepoList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.GitRepoList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested gitRepos.
+func (c *gitRepos) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a gitRepo and creates it. Returns the server's representation of the gitRepo, and an error, if there is any.
+func (c *gitRepos) Create(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.CreateOptions) (result *v1alpha1.GitRepo, err error) {
+ result = &v1alpha1.GitRepo{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(gitRepo).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a gitRepo and updates it. Returns the server's representation of the gitRepo, and an error, if there is any.
+func (c *gitRepos) Update(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.UpdateOptions) (result *v1alpha1.GitRepo, err error) {
+ result = &v1alpha1.GitRepo{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ Name(gitRepo.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(gitRepo).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *gitRepos) UpdateStatus(ctx context.Context, gitRepo *v1alpha1.GitRepo, opts v1.UpdateOptions) (result *v1alpha1.GitRepo, err error) {
+ result = &v1alpha1.GitRepo{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ Name(gitRepo.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(gitRepo).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the gitRepo and deletes it. Returns an error if one occurs.
+func (c *gitRepos) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *gitRepos) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("gitrepos").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched gitRepo.
+func (c *gitRepos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GitRepo, err error) {
+ result = &v1alpha1.GitRepo{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("gitrepos").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitreporestriction.go b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitreporestriction.go
new file mode 100644
index 0000000..64221e3
--- /dev/null
+++ b/pkg/client/fleet/clientset/versioned/typed/fleet.cattle.io/v1alpha1/gitreporestriction.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned/scheme"
+ v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// GitRepoRestrictionsGetter has a method to return a GitRepoRestrictionInterface.
+// A group's client should implement this interface.
+type GitRepoRestrictionsGetter interface {
+ GitRepoRestrictions(namespace string) GitRepoRestrictionInterface
+}
+
+// GitRepoRestrictionInterface has methods to work with GitRepoRestriction resources.
+type GitRepoRestrictionInterface interface {
+ Create(ctx context.Context, gitRepoRestriction *v1alpha1.GitRepoRestriction, opts v1.CreateOptions) (*v1alpha1.GitRepoRestriction, error)
+ Update(ctx context.Context, gitRepoRestriction *v1alpha1.GitRepoRestriction, opts v1.UpdateOptions) (*v1alpha1.GitRepoRestriction, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.GitRepoRestriction, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.GitRepoRestrictionList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GitRepoRestriction, err error)
+ GitRepoRestrictionExpansion
+}
+
+// gitRepoRestrictions implements GitRepoRestrictionInterface
+type gitRepoRestrictions struct {
+ client rest.Interface
+ ns string
+}
+
+// newGitRepoRestrictions returns a GitRepoRestrictions
+func newGitRepoRestrictions(c *FleetV1alpha1Client, namespace string) *gitRepoRestrictions {
+ return &gitRepoRestrictions{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the gitRepoRestriction, and returns the corresponding gitRepoRestriction object, and an error if there is any.
+func (c *gitRepoRestrictions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.GitRepoRestriction, err error) {
+ result = &v1alpha1.GitRepoRestriction{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of GitRepoRestrictions that match those selectors.
+func (c *gitRepoRestrictions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GitRepoRestrictionList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.GitRepoRestrictionList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested gitRepoRestrictions.
+func (c *gitRepoRestrictions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a gitRepoRestriction and creates it. Returns the server's representation of the gitRepoRestriction, and an error, if there is any.
+func (c *gitRepoRestrictions) Create(ctx context.Context, gitRepoRestriction *v1alpha1.GitRepoRestriction, opts v1.CreateOptions) (result *v1alpha1.GitRepoRestriction, err error) {
+ result = &v1alpha1.GitRepoRestriction{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(gitRepoRestriction).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a gitRepoRestriction and updates it. Returns the server's representation of the gitRepoRestriction, and an error, if there is any.
+func (c *gitRepoRestrictions) Update(ctx context.Context, gitRepoRestriction *v1alpha1.GitRepoRestriction, opts v1.UpdateOptions) (result *v1alpha1.GitRepoRestriction, err error) {
+ result = &v1alpha1.GitRepoRestriction{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ Name(gitRepoRestriction.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(gitRepoRestriction).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the gitRepoRestriction and deletes it. Returns an error if one occurs.
+func (c *gitRepoRestrictions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *gitRepoRestrictions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched gitRepoRestriction.
+func (c *gitRepoRestrictions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GitRepoRestriction, err error) {
+ result = &v1alpha1.GitRepoRestriction{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("gitreporestrictions").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/cmd/options.go b/pkg/cmd/options.go
new file mode 100644
index 0000000..9216675
--- /dev/null
+++ b/pkg/cmd/options.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "errors"
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller"
+ apisconfig "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config"
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config/v1alpha1"
+ controllerconfig "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/config"
+ "github.com/gardener/gardener/extensions/pkg/controller/cmd"
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ extensionshealthcheckcontroller "github.com/gardener/gardener/extensions/pkg/controller/healthcheck"
+ healthcheckcontroller "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/healthcheck"
+ "github.com/spf13/pflag"
+ "io/ioutil"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ scheme *runtime.Scheme
+ decoder runtime.Decoder
+)
+
+func init() {
+ scheme = runtime.NewScheme()
+ utilruntime.Must(apisconfig.AddToScheme(scheme))
+ utilruntime.Must(v1alpha1.AddToScheme(scheme))
+
+ decoder = serializer.NewCodecFactory(scheme).UniversalDecoder()
+}
+
+
+// FleetServiceOptions holds options related to the fleet agent service.
+type FleetServiceOptions struct {
+ ConfigLocation string
+ config *FleetServiceConfig
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (o *FleetServiceOptions) AddFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&o.ConfigLocation, "config", "", "Path to fleet agent service configuration")
+}
+
+// Complete implements Completer.Complete.
+func (o *FleetServiceOptions) Complete() error {
+ if o.ConfigLocation == "" {
+ return errors.New("config location is not set")
+ }
+
+ data, err := ioutil.ReadFile(o.ConfigLocation)
+ if err != nil {
+ return err
+ }
+
+ config := apisconfig.FleetAgentConfig{}
+ _, _, err = decoder.Decode(data, nil, &config)
+ if err != nil {
+ return err
+ }
+
+ o.config = &FleetServiceConfig{
+ config: config,
+ }
+
+ return nil
+}
+
+// Completed returns the decoded FleetServiceConfiguration instance. Only call this if `Complete` was successful.
+func (o *FleetServiceOptions) Completed() *FleetServiceConfig {
+ return o.config
+}
+
+// FleetServiceConfig contains configuration information about the fleet service.
+type FleetServiceConfig struct {
+ config apisconfig.FleetAgentConfig
+}
+
+// Apply applies the FleetServiceOptions to the passed ControllerOptions instance.
+func (c *FleetServiceConfig) Apply(config *controllerconfig.Config) {
+ config.FleetAgentConfig = c.config
+}
+
+// ControllerSwitches are the cmd.SwitchOptions for the provider controllers.
+func ControllerSwitches() *cmd.SwitchOptions {
+ return cmd.NewSwitchOptions(
+ cmd.Switch(controller.ControllerName, controller.AddToManager),
+ cmd.Switch(extensionshealthcheckcontroller.ControllerName, healthcheckcontroller.AddToManager),
+ )
+}
+
+func (c *FleetServiceConfig) ApplyHealthCheckConfig(config *healthcheckconfig.HealthCheckConfig) {
+ if c.config.HealthCheckConfig != nil {
+ *config = *c.config.HealthCheckConfig
+ }
+}
\ No newline at end of file
diff --git a/pkg/controller/actuator.go b/pkg/controller/actuator.go
new file mode 100644
index 0000000..e9b2ad7
--- /dev/null
+++ b/pkg/controller/actuator.go
@@ -0,0 +1,204 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "context"
+ b64 "encoding/base64"
+ "fmt"
+ "github.com/gardener/gardener/pkg/extensions"
+ "github.com/go-logr/logr"
+ fleetv1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/gardener/gardener/extensions/pkg/controller"
+ "github.com/gardener/gardener/extensions/pkg/controller/extension"
+ "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/config"
+
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+)
+
+// ActuatorName is the name of the Fleet agent actuator.
+const ActuatorName = "shoot-fleet-agent-actuator"
+const KubeconfigSecretName = "kubecfg"
+const KubeconfigKey = "kubeconfig"
+
+// NewActuator returns an actuator responsible for Extension resources.
+func NewActuator(config config.Config) extension.Actuator {
+ fleetKubeConfig, _ := b64.StdEncoding.DecodeString(config.FleetAgentConfig.ClientConnection.Kubeconfig)
+ var kubeconfigPath string
+ var err error
+ if kubeconfigPath, err = writeKubeconfigToTempFile(fleetKubeConfig); err != nil {
+ panic(err)
+ }
+ fleetClientConfig, _ := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
+ fleetManager, err := NewManagerForConfig(fleetClientConfig, "clusters")//TODO get from config
+ if err != nil {
+ panic(err)
+ }
+
+ return &actuator{
+ logger: log.Log.WithName(ActuatorName),
+ serviceConfig: config,
+ fleetManager: fleetManager,
+ }
+}
+
+type actuator struct {
+ client client.Client
+ config *rest.Config
+ decoder runtime.Decoder
+ fleetManager *FleetManager
+
+ serviceConfig config.Config
+
+ logger logr.Logger
+}
+
+// Reconcile the Extension resource.
+func (a *actuator) Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
+ namespace := ex.GetNamespace()
+ a.logger.Info("Component is being reconciled", "component", "fleet-agent-management", "namespace", namespace)
+ cluster, err := controller.GetCluster(ctx, a.client, namespace)
+ if err != nil {
+ return err
+ }
+
+ cfg := &config.Config{}
+ if ex.Spec.ProviderConfig != nil {//here we parse providerconfig
+ if _, _, err := a.decoder.Decode(ex.Spec.ProviderConfig.Raw, nil, cfg); err != nil {
+ return fmt.Errorf("failed to decode provider config: %+v", err)
+ }
+ }
+
+ a.registerClusterInFleetManager(ctx, namespace, cluster)
+ return a.updateStatus(ctx, ex)
+}
+
+// Delete the Extension resource.
+func (a *actuator) Delete(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
+ namespace := ex.GetNamespace()
+ a.logger.Info("Component is being deleted", "component", "fleet-agent-management", "namespace", namespace)
+ return nil
+}
+
+// Restore the Extension resource.
+func (a *actuator) Restore(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
+ a.logger.Info("Component is being restored", "component", "fleet-agent-management")
+ return a.Reconcile(ctx, ex)
+}
+
+// Migrate the Extension resource.
+func (a *actuator) Migrate(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
+ a.logger.Info("Component is being migrated", "component", "fleet-agent-management")
+
+ return a.Delete(ctx, ex)
+}
+
+// InjectConfig injects the rest config to this actuator.
+func (a *actuator) InjectConfig(config *rest.Config) error {
+ a.config = config
+ return nil
+}
+
+// InjectClient injects the controller runtime client into the reconciler.
+func (a *actuator) InjectClient(client client.Client) error {
+ a.client = client
+ return nil
+}
+
+// InjectScheme injects the given scheme into the reconciler.
+func (a *actuator) InjectScheme(scheme *runtime.Scheme) error {
+ a.decoder = serializer.NewCodecFactory(scheme).UniversalDecoder()
+ return nil
+}
+
+func (a *actuator) registerClusterInFleetManager(ctx context.Context, namespace string, cluster *extensions.Cluster) {
+ a.logger.Info("Starting with already registered check")
+ registered, err := a.fleetManager.GetCluster(ctx, cluster.Shoot.Name)
+ if !errors.IsNotFound(err) {
+ a.logger.Info("Cluster already registered - skipping registration", "clientId", registered.Spec.ClientID)
+ return
+ } else {
+ a.logger.Info("Cluster registration not found.")
+ }
+ a.logger.Info("Starting cluster registration process")
+ secret := &corev1.Secret{}
+
+ labels := make(map[string]string)
+ labels["corebundle"] = "true"
+ labels["region"] = cluster.Shoot.Spec.Region
+ labels["cluster"] = cluster.Shoot.Name
+ if a.serviceConfig.FleetAgentConfig.Labels != nil && len(a.serviceConfig.FleetAgentConfig.Labels) > 0 {//adds labels from configuration
+ for key, value := range a.serviceConfig.Labels{
+ labels[key] = value
+ }
+ }
+ a.logger.Info("Looking up Secret with KubeConfig for given Shoot.", "namespace", namespace, "secretName", KubeconfigSecretName)
+
+ if err := a.client.Get(ctx, kutil.Key(namespace, KubeconfigSecretName), secret); err == nil {
+ secretData := make(map[string][]byte)
+ secretData["value"] = secret.Data[KubeconfigKey]
+ a.logger.Info("Loaded kubeconfig from secret", "kubeconfig", secret, "namespace", namespace)
+
+ const fleetRegisterNamespace = "clusters"
+ kubeconfigSecret := corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "kubecfg-" + cluster.Shoot.Name,
+ Namespace: fleetRegisterNamespace,
+ },
+ Data: secretData,
+ }
+
+ clusterRegistration := fleetv1alpha1.Cluster{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: cluster.Shoot.Name,
+ Namespace: fleetRegisterNamespace,
+ Labels: labels,
+ },
+ Spec: fleetv1alpha1.ClusterSpec{
+ KubeConfigSecret: "kubecfg-" + cluster.Shoot.Name,
+ },
+ }
+ a.logger.Info("Creating kubeconfig secret for Fleet registration.")
+ if _, err = a.fleetManager.CreateKubeconfigSecret(ctx, &kubeconfigSecret); err != nil {
+ a.logger.Error(err, "Failed to create secret with kubeconfig for Fleet registration")
+ }
+ a.logger.Info("Creating Cluster registration for Fleet registration.")
+ if _, err = a.fleetManager.CreateCluster(ctx, &clusterRegistration); err != nil {
+ a.logger.Error(err, "Failed to create Cluster for Fleet registration")
+ }
+ a.logger.Info("Registered shoot cluster in Fleet Manager ", "registration", clusterRegistration)
+ } else {
+ a.logger.Error(err, "Failed to find Secret with kubeconfig for Fleet registration.")
+ }
+}
+
+func (a *actuator) updateStatus(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
+ return controller.TryUpdateStatus(ctx, retry.DefaultBackoff, a.client, ex, func() error {
+ return nil
+ })
+}
\ No newline at end of file
diff --git a/pkg/controller/add.go b/pkg/controller/add.go
new file mode 100644
index 0000000..70ec3e1
--- /dev/null
+++ b/pkg/controller/add.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ controllerconfig "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/config"
+
+ "github.com/gardener/gardener/extensions/pkg/controller/extension"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+const (
+ // Type is the type of Extension resource.
+ Type = "shoot-fleet-agent"
+ // ControllerName is the name of the shoot fleet agent service controller.
+ ControllerName = "shoot_fleet_agent"
+ // FinalizerSuffix is the finalizer suffix for the shoot fleet agent service controller.
+ FinalizerSuffix = "shoot-fleet-agent"
+)
+
+var (
+ // DefaultAddOptions are the default AddOptions for AddToManager.
+ DefaultAddOptions = AddOptions{}
+)
+
+// AddOptions are options to apply when adding the shoot fleet agent service controller to the manager.
+type AddOptions struct {
+ // ControllerOptions contains options for the controller.
+ ControllerOptions controller.Options
+ // ServiceConfig contains configuration for the shoot fleet agent service.
+ ServiceConfig controllerconfig.Config
+ // IgnoreOperationAnnotation specifies whether to ignore the operation annotation or not.
+ IgnoreOperationAnnotation bool
+}
+
+// AddToManager adds a controller with the default Options to the given Controller Manager.
+func AddToManager(mgr manager.Manager) error {
+ return AddToManagerWithOptions(mgr, DefaultAddOptions.ControllerOptions, DefaultAddOptions.ServiceConfig)
+}
+
+// AddToManagerWithOptions adds a controller with the given Options to the given manager.
+// The opts.Reconciler is being set with a newly instantiated actuator.
+func AddToManagerWithOptions(mgr manager.Manager, opts controller.Options, config controllerconfig.Config) error {
+ return extension.Add(mgr, extension.AddArgs{
+ Actuator: NewActuator(config),
+ ControllerOptions: opts,
+ Name: ControllerName,
+ FinalizerSuffix: FinalizerSuffix,
+ Resync: 0,
+ Predicates: extension.DefaultPredicates(DefaultAddOptions.IgnoreOperationAnnotation),
+ Type: Type,
+ })
+}
diff --git a/pkg/controller/config/config.go b/pkg/controller/config/config.go
new file mode 100644
index 0000000..2102019
--- /dev/null
+++ b/pkg/controller/config/config.go
@@ -0,0 +1,7 @@
+package config
+
+import "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/apis/config"
+
+type Config struct {
+ config.FleetAgentConfig
+}
\ No newline at end of file
diff --git a/pkg/controller/fleetmanager.go b/pkg/controller/fleetmanager.go
new file mode 100644
index 0000000..60f1170
--- /dev/null
+++ b/pkg/controller/fleetmanager.go
@@ -0,0 +1,51 @@
+package controller
+
+import (
+ "context"
+ clientset "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/client/fleet/clientset/versioned"
+ "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+)
+
+type FleetManager struct {
+ secretClient kubernetes.Clientset
+ fleetClient clientset.Interface
+ namespace string
+}
+
+func NewManagerForConfig(c *rest.Config, namespace string) (*FleetManager, error) {
+ secretClient, err := kubernetes.NewForConfig(c)
+ if err != nil {
+ return nil, err
+ }
+
+ fleetClient, err := clientset.NewForConfig(c)
+ if err != nil {
+ return nil, err
+ }
+
+ return &FleetManager{
+ secretClient: *secretClient,
+ fleetClient: fleetClient,
+ namespace: namespace,
+ }, nil
+}
+
+func (f *FleetManager) CreateCluster(ctx context.Context, cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
+ return f.fleetClient.FleetV1alpha1().Clusters(f.namespace).Create(ctx, cluster, metav1.CreateOptions{})
+}
+
+func (f *FleetManager) UpdateCluster(ctx context.Context, cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
+ return f.fleetClient.FleetV1alpha1().Clusters(f.namespace).Update(ctx, cluster, metav1.UpdateOptions{})
+}
+
+func (f *FleetManager) GetCluster(ctx context.Context, clusterName string) (*v1alpha1.Cluster, error) {
+ return f.fleetClient.FleetV1alpha1().Clusters(f.namespace).Get(ctx, clusterName, metav1.GetOptions{})
+}
+
+func (f *FleetManager) CreateKubeconfigSecret(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) {
+ return f.secretClient.CoreV1().Secrets(f.namespace).Create(ctx, secret, metav1.CreateOptions{})
+}
\ No newline at end of file
diff --git a/pkg/controller/healthcheck/add.go b/pkg/controller/healthcheck/add.go
new file mode 100644
index 0000000..bdd0062
--- /dev/null
+++ b/pkg/controller/healthcheck/add.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "time"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ fleetcontroller "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller"
+
+ "github.com/gardener/gardener/extensions/pkg/controller/healthcheck"
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+var (
+ defaultSyncPeriod = time.Second * 30
+ // DefaultAddOptions are the default DefaultAddArgs for AddToManager.
+ DefaultAddOptions = healthcheck.DefaultAddArgs{
+ HealthCheckConfig: healthcheckconfig.HealthCheckConfig{SyncPeriod: metav1.Duration{Duration: defaultSyncPeriod}},
+ }
+)
+
+// RegisterHealthChecks registers health checks for each extension resource
+// HealthChecks are grouped by extension (e.g worker), extension.type (e.g aws) and Health Check Type (e.g SystemComponentsHealthy)
+func RegisterHealthChecks(mgr manager.Manager, opts healthcheck.DefaultAddArgs) error {
+ return healthcheck.DefaultRegistration(
+ fleetcontroller.Type,
+ extensionsv1alpha1.SchemeGroupVersion.WithKind(extensionsv1alpha1.ExtensionResource),
+ func() client.ObjectList { return &extensionsv1alpha1.ExtensionList{} },
+ func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} },
+ mgr,
+ opts,
+ nil,
+ []healthcheck.ConditionTypeToHealthCheck{},
+ )
+}
+
+// AddToManager adds a controller with the default Options.
+func AddToManager(mgr manager.Manager) error {
+ return RegisterHealthChecks(mgr, DefaultAddOptions)
+}
diff --git a/pkg/controller/utils.go b/pkg/controller/utils.go
new file mode 100644
index 0000000..422bc4b
--- /dev/null
+++ b/pkg/controller/utils.go
@@ -0,0 +1,24 @@
+package controller
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+func writeKubeconfigToTempFile(kubeconfig []byte) (path string, error error) {
+ tmpFile, err := ioutil.TempFile(os.TempDir(), "kubeconfig-")
+ if err != nil {
+ return "", err
+ }
+
+ if _, err = tmpFile.Write(kubeconfig); err != nil {
+ return "", err
+ }
+
+ // Close the file
+ if err := tmpFile.Close(); err != nil {
+ return "", err
+ }
+ return tmpFile.Name(), nil
+}
+
diff --git a/pkg/imagevector/imagevector.go b/pkg/imagevector/imagevector.go
new file mode 100644
index 0000000..b19ff41
--- /dev/null
+++ b/pkg/imagevector/imagevector.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//enables image vector override functionality
+//go:generate packr2
+
+package imagevector
+
+import (
+ "strings"
+
+ "github.com/gardener/gardener/pkg/utils/imagevector"
+ "github.com/gobuffalo/packr/v2"
+ "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var imageVector imagevector.ImageVector
+
+func init() {
+ box := packr.New("charts", "../../charts")
+
+ imagesYaml, err := box.FindString("images.yaml")
+ runtime.Must(err)
+
+ imageVector, err = imagevector.Read(strings.NewReader(imagesYaml))
+ runtime.Must(err)
+
+ imageVector, err = imagevector.WithEnvOverride(imageVector)
+ runtime.Must(err)
+}
+
+// ImageVector is the image vector that contains all the needed images.
+func ImageVector() imagevector.ImageVector {
+ return imageVector
+}
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
new file mode 100644
index 0000000..0cd3800
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -0,0 +1,5 @@
+TAGS
+tags
+.*.swp
+tomlcheck/tomlcheck
+toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
new file mode 100644
index 0000000..8b8afc4
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+install:
+ - go install ./...
+ - go get github.com/BurntSushi/toml-test
+script:
+ - export PATH="$PATH:$HOME/gopath/bin"
+ - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
new file mode 100644
index 0000000..6efcfd0
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
+
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 0000000..01b5743
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 TOML authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
new file mode 100644
index 0000000..3600848
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/Makefile
@@ -0,0 +1,19 @@
+install:
+ go install ./...
+
+test: install
+ go test -v
+ toml-test toml-test-decoder
+ toml-test -encoder toml-test-encoder
+
+fmt:
+ gofmt -w *.go */*.go
+ colcheck *.go */*.go
+
+tags:
+ find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+ git push origin master
+ git push github master
+
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
new file mode 100644
index 0000000..7c1b37e
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -0,0 +1,218 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Spec: https://github.com/toml-lang/toml
+
+Compatible with TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+
+Documentation: https://godoc.org/github.com/BurntSushi/toml
+
+Installation:
+
+```bash
+go get github.com/BurntSushi/toml
+```
+
+Try the toml validator:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles `XML`
+and `JSON`. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+ // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+ ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+ Name string
+ Duration duration
+}
+type songs struct {
+ Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+```
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 0000000..b0fd51d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,509 @@
+package toml
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ }
+ if rv.IsNil() {
+ return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+ }
+ p, err := parse(data)
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, indirect(rv))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+ // Special case. Look for a `Primitive` value.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Handle time.Time values specifically.
+ // TODO: Remove this code when we decide to drop support for Go 1.1.
+ // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+ // interfaces.
+ if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+ return md.unifyDatetime(data, rv)
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // BUG(burntsushi)
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML
+ // hash or array. In particular, the unmarshaler should only be applied
+ // to primitive TOML values. But at this point, it will be applied to
+ // all kinds of values and produce an incorrect error whenever those values
+ // are hashes or arrays (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("unsupported type %s", rv.Type())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if mapping == nil {
+ return nil
+ }
+ return e("type mismatch for %s: expected table but found %T",
+ rv.Type().String(), mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("cannot write unexported field %s.%s",
+ rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if tmap == nil {
+ return nil
+ }
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if sliceLen != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d",
+ rv.Len(), sliceLen)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ n := datav.Len()
+ if rv.IsNil() || rv.Cap() < n {
+ rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+ }
+ rv.SetLen(n)
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ sliceLen := data.Len()
+ for i := 0; i < sliceLen; i++ {
+ v := data.Index(i).Interface()
+ sliceval := indirect(rv.Index(i))
+ if err := md.unify(v, sliceval); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("value %d is out of range for int8", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("value %d is out of range for int16", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("value %d is out of range for int32", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("value %d is out of range for uint8", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("value %d is out of range for uint16", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("value %d is out of range for uint32", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanSet() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("cannot load TOML value of type %T into a Go %s", data, expected)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 0000000..b9914a6
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,121 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+// // access the TOML key 'a.b.c'
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+ return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ }
+ return k[i]
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 0000000..b371f39
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/toml-lang/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 0000000..d905c21
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,568 @@
+package toml
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayMixedElementTypes = errors.New(
+ "toml: cannot encode array with mixed element types")
+ errArrayNilElement = errors.New(
+ "toml: cannot encode array with nil element")
+ errNonString = errors.New(
+ "toml: cannot encode a map with non-string key type")
+ errAnonNonStruct = errors.New(
+ "toml: cannot encode an anonymous field that is not a struct")
+ errArrayNoTable = errors.New(
+ "toml: TOML array element cannot contain a table")
+ errNoKey = errors.New(
+ "toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\t", "\\t",
+ "\n", "\\n",
+ "\r", "\\r",
+ "\"", "\\\"",
+ "\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+ // A single indentation level. By default it is two spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch rv.Interface().(type) {
+ case time.Time, TextMarshaler:
+ enc.keyEqElement(key, rv)
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.keyEqElement(key, rv)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.keyEqElement(key, rv)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ panic(e("unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time:
+ // Special case time.Time as a primitive. Has to come before
+ // TextMarshaler below because time.Time implements
+ // encoding.TextMarshaler, but we need to always use UTC.
+ enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+ return
+ case TextMarshaler:
+ // Special case. Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ case reflect.Float64:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ default:
+ panic(e("unexpected primitive type: %s", rv.Kind()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ panicIfInvalidKey(key)
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ panicIfInvalidKey(key)
+ if len(key) == 1 {
+ // Output an extra newline between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv)
+ case reflect.Struct:
+ enc.eStruct(key, rv)
+ default:
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string) {
+ sort.Strings(mapKeys)
+ for _, mapKey := range mapKeys {
+ mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(mrv) {
+ // Don't write anything for nil fields.
+ continue
+ }
+ enc.encode(key.add(mapKey), mrv)
+ }
+ }
+ writeMapKeys(mapKeysDirect)
+ writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table, then all keys under it will be in that
+ // table (not the one we're writing here).
+ rt := rv.Type()
+ var fieldsDirect, fieldsSub [][]int
+ var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ // skip unexported fields
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+ frv := rv.Field(i)
+ if f.Anonymous {
+ t := f.Type
+ switch t.Kind() {
+ case reflect.Struct:
+ // Treat anonymous struct fields with
+ // tag names as though they are not
+ // anonymous, like encoding/json does.
+ if getOptions(f.Tag).name == "" {
+ addFields(t, frv, f.Index)
+ continue
+ }
+ case reflect.Ptr:
+ if t.Elem().Kind() == reflect.Struct &&
+ getOptions(f.Tag).name == "" {
+ if !frv.IsNil() {
+ addFields(t.Elem(), frv.Elem(), f.Index)
+ }
+ continue
+ }
+ // Fall through to the normal field encoding logic below
+ // for non-struct anonymous fields.
+ }
+ }
+
+ if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ var writeFields = func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ sft := rt.FieldByIndex(fieldIndex)
+ sf := rv.FieldByIndex(fieldIndex)
+ if isNil(sf) {
+ // Don't write anything for nil fields.
+ continue
+ }
+
+ opts := getOptions(sft.Tag)
+ if opts.skip {
+ continue
+ }
+ keyName := sft.Name
+ if opts.name != "" {
+ keyName = opts.name
+ }
+ if opts.omitempty && isEmpty(sf) {
+ continue
+ }
+ if opts.omitzero && isZero(sf) {
+ continue
+ }
+
+ enc.encode(key.add(keyName), sf)
+ }
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ }
+ return tomlArray
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case TextMarshaler:
+ return tomlString
+ default:
+ return tomlHash
+ }
+ default:
+ panic("unexpected reflect.Kind: " + rv.Kind().String())
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ elem := rv.Index(i)
+ switch elemType := tomlTypeOfGo(elem); {
+ case elemType == nil:
+ encPanic(errArrayNilElement)
+ case !typeEqual(firstType, elemType):
+ encPanic(errArrayMixedElementTypes)
+ }
+ }
+ // If we have a nested array, then we must make sure that the nested
+ // array contains ONLY primitives.
+ // This checks arbitrarily nested arrays.
+ if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+ nest := tomlArrayType(eindirect(rv.Index(0)))
+ if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+ encPanic(errArrayNoTable)
+ }
+ }
+ return firstType
+}
+
+type tagOptions struct {
+ skip bool // "-"
+ name string
+ omitempty bool
+ omitzero bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+ t := tag.Get("toml")
+ if t == "-" {
+ return tagOptions{skip: true}
+ }
+ var opts tagOptions
+ parts := strings.Split(t, ",")
+ opts.name = parts[0]
+ for _, s := range parts[1:] {
+ switch s {
+ case "omitempty":
+ opts.omitempty = true
+ case "omitzero":
+ opts.omitzero = true
+ }
+ }
+ return opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() == 0.0
+ }
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return rv.Len() == 0
+ case reflect.Bool:
+ return !rv.Bool()
+ }
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ panicIfInvalidKey(key)
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
+
+func panicIfInvalidKey(key Key) {
+ for _, k := range key {
+ if len(k) == 0 {
+ encPanic(e("Key '%s' is not a valid table name. Key names "+
+ "cannot be empty.", key.maybeQuotedAll()))
+ }
+ }
+}
+
+func isValidKeyName(s string) bool {
+ return len(s) != 0
+}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
new file mode 100644
index 0000000..d36e1dd
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types.go
@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+ "encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
new file mode 100644
index 0000000..e8d503d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 0000000..e0a742a
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,953 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemCommentStart
+ itemInlineTableStart
+ itemInlineTableEnd
+)
+
+const (
+ eof = 0
+ comma = ','
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+ inlineTableStart = '{'
+ inlineTableEnd = '}'
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+
+ // Allow for backing up up to three runes.
+ // This is necessary because TOML contains 3-rune tokens (""" and ''').
+ prevWidths [3]int
+ nprev int // how many of prevWidths are in use
+ // If we emit an eof, we can still back up, but it is not OK to call
+ // next again.
+ atEOF bool
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input,
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.atEOF {
+ panic("next called after EOF")
+ }
+ if lx.pos >= len(lx.input) {
+ lx.atEOF = true
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ lx.prevWidths[2] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[0]
+ if lx.nprev < 3 {
+ lx.nprev++
+ }
+ r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.prevWidths[0] = w
+ lx.pos += w
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only twice between calls to next.
+func (lx *lexer) backup() {
+ if lx.atEOF {
+ lx.atEOF = false
+ return
+ }
+ if lx.nprev < 1 {
+ panic("backed up too far")
+ }
+ w := lx.prevWidths[0]
+ lx.prevWidths[0] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[2]
+ lx.nprev--
+ lx.pos -= w
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+ for {
+ r := lx.next()
+ if pred(r) {
+ continue
+ }
+ lx.backup()
+ lx.ignore()
+ return
+ }
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("unexpected EOF")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a newline for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.emit(itemEOF)
+ return nil
+ }
+ return lx.errorf("expected a top-level item to end with a newline, "+
+ "comment, or EOF, but got %q instead", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf("expected end of table array name delimiter %q, "+
+ "but got %q instead", arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("unexpected end of table name " +
+ "(table names cannot be empty)")
+ case r == tableSep:
+ return lx.errorf("unexpected table separator " +
+ "(table names cannot be empty)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexValue // reuse string lexing
+ default:
+ return lexBareTableName
+ }
+}
+
+// lexBareTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r) {
+ return lexBareTableName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lexTableNameEnd
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("expected '.' or ']' to end table name, "+
+ "but got %q instead", r)
+ }
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+ r := lx.peek()
+ switch {
+ case r == keySep:
+ return lx.errorf("unexpected key separator %q", keySep)
+ case isWhitespace(r) || isNL(r):
+ lx.next()
+ return lexSkip(lx, lexKeyStart)
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ lx.push(lexKeyEnd)
+ return lexValue // reuse string lexing
+ default:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ return lexBareKey
+ }
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareKey
+ case isWhitespace(r):
+ lx.backup()
+ lx.emit(itemText)
+ return lexKeyEnd
+ case r == keySep:
+ lx.backup()
+ lx.emit(itemText)
+ return lexKeyEnd
+ default:
+ return lx.errorf("bare keys cannot contain %q", r)
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case r == keySep:
+ return lexSkip(lx, lexValue)
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ default:
+ return lx.errorf("expected key separator %q, but got %q instead",
+ keySep, r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT newlines.
+ // In array syntax, the array states are responsible for ignoring newlines.
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ }
+ switch r {
+ case arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case inlineTableStart:
+ lx.ignore()
+ lx.emit(itemInlineTableStart)
+ return lexInlineTableValue
+ case stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case '+', '-':
+ return lexNumberStart
+ case '.': // special error case, be kind to users
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ if unicode.IsLetter(r) {
+ // Be permissive here; lexBool will give a nice error if the
+ // user wrote something like
+ // x = foo
+ // (i.e. not 'true' or 'false' but is something else word-like.)
+ lx.backup()
+ return lexBool
+ }
+ return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == arrayEnd:
+ // NOTE(caleb): The spec isn't clear about whether you can have
+ // a trailing comma or not, so we'll allow it.
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf(
+ "expected a comma or array terminator %q, but got %q instead",
+ arrayEnd, r,
+ )
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValue)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ lx.backup()
+ lx.push(lexInlineTableValueEnd)
+ return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValueEnd)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ return lexInlineTableValue
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ return lx.errorf("expected a comma or an inline table terminator %q, "+
+ "but got %q instead", inlineTableEnd, r)
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemInlineTableEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == eof:
+ return lx.errorf("unexpected EOF")
+ case isNL(r):
+ return lx.errorf("strings cannot contain newlines")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ switch lx.next() {
+ case eof:
+ return lx.errorf("unexpected EOF")
+ case '\\':
+ return lexMultilineStringEscape
+ case stringEnd:
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == eof:
+ return lx.errorf("unexpected EOF")
+ case isNL(r):
+ return lx.errorf("strings cannot contain newlines")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ switch lx.next() {
+ case eof:
+ return lx.errorf("unexpected EOF")
+ case rawStringEnd:
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ return lexMultilineString
+ }
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("invalid escape character %q; only the following "+
+ "escape characters are allowed: "+
+ `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf(`expected four hexadecimal digits after '\u', `+
+ "but got %q instead", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf(`expected eight hexadecimal digits after '\U', `+
+ "but got %q instead", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case 'e', 'E':
+ return lexFloat
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '-':
+ return lexDatetime
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDatetime
+ }
+ switch r {
+ case '-', 'T', ':', '.', 'Z', '+':
+ return lexDatetime
+ }
+
+ lx.backup()
+ lx.emit(itemDatetime)
+ return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that a sign
+// has already been read, but that *no* digits have been consumed.
+// lexNumberStart will move to the appropriate integer or float states.
+func lexNumberStart(lx *lexer) stateFn {
+ // We MUST see a digit. Even floats have to start with a digit.
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ return lx.errorf("expected a digit but got %q", r)
+ }
+ return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumber
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+ switch r {
+ case '_', '.', '-', '+', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+ var rs []rune
+ for {
+ r := lx.next()
+ if !unicode.IsLetter(r) {
+ lx.backup()
+ break
+ }
+ rs = append(rs, r)
+ }
+ s := string(rs)
+ switch s {
+ case "true", "false":
+ lx.emit(itemBool)
+ return lx.pop()
+ }
+ return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ r := lx.peek()
+ if isNL(r) || r == eof {
+ lx.emit(itemText)
+ return lx.pop()
+ }
+ lx.next()
+ return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ return func(lx *lexer) stateFn {
+ lx.ignore()
+ return nextState
+ }
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 0000000..50869ef
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,592 @@
+package toml
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ // A list of keys in the order that they appear in the TOML data.
+ ordered []Key
+
+ // the full key for the current hash in scope
+ context Key
+
+ // the base key name for everything except hashes
+ currentKey string
+
+ // rough approximation of line number
+ approxLine int
+
+ // A map of 'key.group.names' to whether they were created implicitly.
+ implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+ return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(parseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ p.approxLine, p.current(), fmt.Sprintf(format, v...))
+ panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart:
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemTableEnd, kg.typ)
+
+ p.establishContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemArrayTableEnd, kg.typ)
+
+ p.establishContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart:
+ kname := p.next()
+ p.approxLine = kname.line
+ p.currentKey = p.keyString(kname)
+
+ val, typ := p.value(p.next())
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ panic("unreachable")
+ }
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+ return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ }
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ case itemInteger:
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits",
+ it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Integer '%s' is out of the range of 64-bit "+
+ "signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemFloat:
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
+ }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be "+
+ "surrounded by digits", it.val)
+ }
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed "+
+ "by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Float '%s' is out of the range of 64-bit "+
+ "IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.panicf("Invalid float value: %q", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemDatetime:
+ var t time.Time
+ var ok bool
+ var err error
+ for _, format := range []string{
+ "2006-01-02T15:04:05Z07:00",
+ "2006-01-02T15:04:05",
+ "2006-01-02",
+ } {
+ t, err = time.ParseInLocation(format, it.val, time.Local)
+ if err == nil {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+ case itemArray:
+ array := make([]interface{}, 0)
+ types := make([]tomlType, 0)
+
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, p.typeOfArray(types)
+ case itemInlineTableStart:
+ var (
+ hash = make(map[string]interface{})
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ p.currentKey = ""
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ != itemKeyStart {
+ p.bug("Expected key start but instead found %q, around line %d",
+ it.val, p.approxLine)
+ }
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ // retrieve key
+ k := p.next()
+ p.approxLine = k.line
+ kname := p.keyString(k)
+
+ // retrieve value
+ p.currentKey = kname
+ val, typ := p.value(p.next())
+ // make sure we keep metadata up to date
+ p.setType(kname, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ hash[kname] = val
+ }
+ p.context = outerContext
+ p.currentKey = outerKey
+ return hash, tomlHash
+ }
+ p.bug("Unexpected value type: %s", it.typ)
+ panic("unreachable")
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+ accept := false
+ for _, r := range s {
+ if r == '_' {
+ if !accept {
+ return false
+ }
+ accept = false
+ continue
+ }
+ accept = true
+ }
+ return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+ period := false
+ for _, r := range s {
+ if period && !isDigit(r) {
+ return false
+ }
+ period = r == '.'
+ }
+ return !period
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 5)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as "+
+ "an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var tmpHash interface{}
+ var ok bool
+
+ hash := p.mapping
+ keyContext := make(Key, 0)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.bug("Expected hash to have type 'map[string]interface{}', but "+
+ "it has '%T' instead.", tmpHash)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Typically, if the given key has already been set, then we have
+ // to raise an error since duplicate keys are disallowed. However,
+ // it's possible that a key was previously defined implicitly. In this
+ // case, it is allowed to be redefined concretely. (See the
+ // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+ p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+ p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+ return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) == 0 || s[0] != '\n' {
+ return s
+ }
+ return s[1:]
+}
+
+func stripEscapedWhitespace(s string) string {
+ esc := strings.Split(s, "\\\n")
+ if len(esc) > 1 {
+ for i := 1; i < len(esc); i++ {
+ esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+ }
+ }
+ return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+ if !utf8.ValidRune(rune(hex)) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+ return ty == itemString || ty == itemMultilineString ||
+ ty == itemRawString || ty == itemRawMultilineString
+}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
new file mode 100644
index 0000000..562164b
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/session.vim
@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 0000000..c73f8af
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+ // Empty arrays are cool.
+ if len(types) == 0 {
+ return tomlArray
+ }
+
+ theType := types[0]
+ for _, t := range types[1:] {
+ if !typeEqual(theType, t) {
+ p.panicf("Array contains values of type '%s' and '%s', but "+
+ "arrays must be homogeneous.", theType, t)
+ }
+ }
+ return tomlArray
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 0000000..608997c
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ opts := getOptions(sf.Tag)
+ if opts.skip {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := opts.name != ""
+ name := opts.name
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml
new file mode 100644
index 0000000..4025e01
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+ - tip
+
+script:
+ - go test -v
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/06e3328629952dabe3e0
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: never # options: [always|never|change] default: always
diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md
new file mode 100644
index 0000000..d700ec4
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md
@@ -0,0 +1,8 @@
+# 1.0.1 (2017-05-31)
+
+## Fixed
+- #21: Fix generation of alphanumeric strings (thanks @dbarranco)
+
+# 1.0.0 (2014-04-30)
+
+- Initial release.
diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md
new file mode 100644
index 0000000..163ffe7
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/README.md
@@ -0,0 +1,70 @@
+GoUtils
+===========
+[](https://masterminds.github.io/stability/maintenance.html)
+[](https://godoc.org/github.com/Masterminds/goutils) [](https://travis-ci.org/Masterminds/goutils) [](https://ci.appveyor.com/project/mattfarina/goutils)
+
+
+GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some
+string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes:
+* WordUtils
+* RandomStringUtils
+* StringUtils (partial implementation)
+
+## Installation
+If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this:
+
+ go get github.com/Masterminds/goutils
+
+If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils.
+
+
+## Documentation
+GoUtils doc is available here: [](https://godoc.org/github.com/Masterminds/goutils)
+
+
+## Usage
+The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file).
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/Masterminds/goutils"
+ )
+
+ func main() {
+
+ // EXAMPLE 1: A goutils function which returns no errors
+ fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
+
+ }
+Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file).
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/Masterminds/goutils"
+ )
+
+ func main() {
+
+ // EXAMPLE 2: A goutils function which returns an error
+ rand1, err1 := goutils.Random (-1, 0, 0, true, true)
+
+ if err1 != nil {
+ fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
+ } else {
+ fmt.Println(rand1)
+ }
+
+ }
+
+## License
+GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license.
+
+## Issue Reporting
+Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues
+
+## Website
+* [GoUtils webpage](http://Masterminds.github.io/goutils/)
diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml
new file mode 100644
index 0000000..657564a
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/appveyor.yml
@@ -0,0 +1,21 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\Masterminds\goutils
+shallow_clone: true
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+build: off
+
+install:
+ - go version
+ - go env
+
+test_script:
+ - go test -v
+
+deploy: off
diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
new file mode 100644
index 0000000..177dd86
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
@@ -0,0 +1,251 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package goutils
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math"
+ "math/big"
+ "regexp"
+ "unicode"
+)
+
+/*
+CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomNonAlphaNumeric(count int) (string, error) {
+ return CryptoRandomAlphaNumericCustom(count, false, false)
+}
+
+/*
+CryptoRandomAscii creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAscii(count int) (string, error) {
+ return CryptoRandom(count, 32, 127, false, false)
+}
+
+/*
+CryptoRandomNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomNumeric(count int) (string, error) {
+ return CryptoRandom(count, 0, 0, false, true)
+}
+
+/*
+CryptoRandomAlphabetic creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAlphabetic(count int) (string, error) {
+ return CryptoRandom(count, 0, 0, true, false)
+}
+
+/*
+CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAlphaNumeric(count int) (string, error) {
+ if count == 0 {
+ return "", nil
+ }
+ RandomString, err := CryptoRandom(count, 0, 0, true, true)
+ if err != nil {
+ return "", fmt.Errorf("Error: %s", err)
+ }
+ match, err := regexp.MatchString("([0-9]+)", RandomString)
+ if err != nil {
+ panic(err)
+ }
+
+ if !match {
+ //Get the position between 0 and the length of the string-1 to insert a random number
+ position := getCryptoRandomInt(count)
+ //Insert a random number between [0-9] in the position
+ RandomString = RandomString[:position] + string('0' + getCryptoRandomInt(10)) + RandomString[position + 1:]
+ return RandomString, err
+ }
+ return RandomString, err
+
+}
+
+/*
+CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) {
+ return CryptoRandom(count, 0, 0, letters, numbers)
+}
+
+/*
+CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness.
+If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
+unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
+If chars is not nil, characters stored in chars that are between start and end are chosen.
+
+Parameters:
+ count - the length of random string to create
+ start - the position in set of chars (ASCII/Unicode int) to start at
+ end - the position in set of chars (ASCII/Unicode int) to end before
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
+
+Returns:
+ string - the random string
+ error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
+*/
+func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
+ if count == 0 {
+ return "", nil
+ } else if count < 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
+ return "", err
+ }
+ if chars != nil && len(chars) == 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
+ return "", err
+ }
+
+ if start == 0 && end == 0 {
+ if chars != nil {
+ end = len(chars)
+ } else {
+ if !letters && !numbers {
+ end = math.MaxInt32
+ } else {
+ end = 'z' + 1
+ start = ' '
+ }
+ }
+ } else {
+ if end <= start {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
+ return "", err
+ }
+
+ if chars != nil && end > len(chars) {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
+ return "", err
+ }
+ }
+
+ buffer := make([]rune, count)
+ gap := end - start
+
+ // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
+ // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
+
+ for count != 0 {
+ count--
+ var ch rune
+ if chars == nil {
+ ch = rune(getCryptoRandomInt(gap) + int64(start))
+ } else {
+ ch = chars[getCryptoRandomInt(gap) + int64(start)]
+ }
+
+ if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
+ if ch >= 56320 && ch <= 57343 { // low surrogate range
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = ch
+ count--
+ // Insert high surrogate
+ buffer[count] = rune(55296 + getCryptoRandomInt(128))
+ }
+ } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = rune(56320 + getCryptoRandomInt(128))
+ count--
+ // Insert high surrogate
+ buffer[count] = ch
+ }
+ } else if ch >= 56192 && ch <= 56319 {
+ // private high surrogate, skip it
+ count++
+ } else {
+ // not one of the surrogates*
+ buffer[count] = ch
+ }
+ } else {
+ count++
+ }
+ }
+ return string(buffer), nil
+}
+
+func getCryptoRandomInt(count int) int64 {
+ nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count)))
+ if err != nil {
+ panic(err)
+ }
+ return nBig.Int64()
+}
diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go
new file mode 100644
index 0000000..1364e0c
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go
@@ -0,0 +1,268 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package goutils
+
+import (
+ "fmt"
+ "math"
+ "math/rand"
+ "regexp"
+ "time"
+ "unicode"
+)
+
+// RANDOM provides the time-based seed used to generate random numbers
+var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+/*
+RandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomNonAlphaNumeric(count int) (string, error) {
+ return RandomAlphaNumericCustom(count, false, false)
+}
+
+/*
+RandomAscii creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAscii(count int) (string, error) {
+ return Random(count, 32, 127, false, false)
+}
+
+/*
+RandomNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomNumeric(count int) (string, error) {
+ return Random(count, 0, 0, false, true)
+}
+
+/*
+RandomAlphabetic creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAlphabetic(count int) (string, error) {
+ return Random(count, 0, 0, true, false)
+}
+
+/*
+RandomAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAlphaNumeric(count int) (string, error) {
+ RandomString, err := Random(count, 0, 0, true, true)
+ if err != nil {
+ return "", fmt.Errorf("Error: %s", err)
+ }
+ match, err := regexp.MatchString("([0-9]+)", RandomString)
+ if err != nil {
+ panic(err)
+ }
+
+ if !match {
+ //Get the position between 0 and the length of the string-1 to insert a random number
+ position := rand.Intn(count)
+ //Insert a random number between [0-9] in the position
+ RandomString = RandomString[:position] + string('0'+rand.Intn(10)) + RandomString[position+1:]
+ return RandomString, err
+ }
+ return RandomString, err
+
+}
+
+/*
+RandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) {
+ return Random(count, 0, 0, letters, numbers)
+}
+
+/*
+Random creates a random string based on a variety of options, using default source of randomness.
+This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but
+instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance.
+
+Parameters:
+ count - the length of random string to create
+ start - the position in set of chars (ASCII/Unicode int) to start at
+ end - the position in set of chars (ASCII/Unicode int) to end before
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
+ return RandomSeed(count, start, end, letters, numbers, chars, RANDOM)
+}
+
+/*
+RandomSeed creates a random string based on a variety of options, using supplied source of randomness.
+If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
+unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
+If chars is not nil, characters stored in chars that are between start and end are chosen.
+This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance
+with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably.
+
+Parameters:
+ count - the length of random string to create
+ start - the position in set of chars (ASCII/Unicode decimals) to start at
+ end - the position in set of chars (ASCII/Unicode decimals) to end before
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
+ random - a source of randomness.
+
+Returns:
+ string - the random string
+ error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
+*/
+func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) {
+
+ if count == 0 {
+ return "", nil
+ } else if count < 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
+ return "", err
+ }
+ if chars != nil && len(chars) == 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
+ return "", err
+ }
+
+ if start == 0 && end == 0 {
+ if chars != nil {
+ end = len(chars)
+ } else {
+ if !letters && !numbers {
+ end = math.MaxInt32
+ } else {
+ end = 'z' + 1
+ start = ' '
+ }
+ }
+ } else {
+ if end <= start {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
+ return "", err
+ }
+
+ if chars != nil && end > len(chars) {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
+ return "", err
+ }
+ }
+
+ buffer := make([]rune, count)
+ gap := end - start
+
+ // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
+ // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
+
+ for count != 0 {
+ count--
+ var ch rune
+ if chars == nil {
+ ch = rune(random.Intn(gap) + start)
+ } else {
+ ch = chars[random.Intn(gap)+start]
+ }
+
+ if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
+ if ch >= 56320 && ch <= 57343 { // low surrogate range
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = ch
+ count--
+ // Insert high surrogate
+ buffer[count] = rune(55296 + random.Intn(128))
+ }
+ } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = rune(56320 + random.Intn(128))
+ count--
+ // Insert high surrogate
+ buffer[count] = ch
+ }
+ } else if ch >= 56192 && ch <= 56319 {
+ // private high surrogate, skip it
+ count++
+ } else {
+ // not one of the surrogates*
+ buffer[count] = ch
+ }
+ } else {
+ count++
+ }
+ }
+ return string(buffer), nil
+}
diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go
new file mode 100644
index 0000000..5037c45
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/stringutils.go
@@ -0,0 +1,224 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package goutils
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+// Typically returned by functions where a searched item cannot be found
+const INDEX_NOT_FOUND = -1
+
+/*
+Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..."
+
+Specifically, the algorithm is as follows:
+
+ - If str is less than maxWidth characters long, return it.
+ - Else abbreviate it to (str[0:maxWidth - 3] + "...").
+ - If maxWidth is less than 4, return an illegal argument error.
+ - In no case will it return a string of length greater than maxWidth.
+
+Parameters:
+ str - the string to check
+ maxWidth - maximum length of result string, must be at least 4
+
+Returns:
+ string - abbreviated string
+ error - if the width is too small
+*/
+func Abbreviate(str string, maxWidth int) (string, error) {
+ return AbbreviateFull(str, 0, maxWidth)
+}
+
+/*
+AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..."
+This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not
+necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear
+somewhere in the result.
+In no case will it return a string of length greater than maxWidth.
+
+Parameters:
+ str - the string to check
+ offset - left edge of source string
+ maxWidth - maximum length of result string, must be at least 4
+
+Returns:
+ string - abbreviated string
+ error - if the width is too small
+*/
+func AbbreviateFull(str string, offset int, maxWidth int) (string, error) {
+ if str == "" {
+ return "", nil
+ }
+ if maxWidth < 4 {
+ err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4")
+ return "", err
+ }
+ if len(str) <= maxWidth {
+ return str, nil
+ }
+ if offset > len(str) {
+ offset = len(str)
+ }
+ if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7
+ offset = len(str) - (maxWidth - 3)
+ }
+ abrevMarker := "..."
+ if offset <= 4 {
+ return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker;
+ }
+ if maxWidth < 7 {
+ err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7")
+ return "", err
+ }
+ if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15
+ abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3))
+ return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3);
+ }
+ return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3));
+}
+
+/*
+DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune).
+It returns the string without whitespaces.
+
+Parameter:
+ str - the string to delete whitespace from, may be nil
+
+Returns:
+ the string without whitespaces
+*/
+func DeleteWhiteSpace(str string) string {
+ if str == "" {
+ return str
+ }
+ sz := len(str)
+ var chs bytes.Buffer
+ count := 0
+ for i := 0; i < sz; i++ {
+ ch := rune(str[i])
+ if !unicode.IsSpace(ch) {
+ chs.WriteRune(ch)
+ count++
+ }
+ }
+ if count == sz {
+ return str
+ }
+ return chs.String()
+}
+
+/*
+IndexOfDifference compares two strings, and returns the index at which the strings begin to differ.
+
+Parameters:
+ str1 - the first string
+ str2 - the second string
+
+Returns:
+ the index where str1 and str2 begin to differ; -1 if they are equal
+*/
+func IndexOfDifference(str1 string, str2 string) int {
+ if str1 == str2 {
+ return INDEX_NOT_FOUND
+ }
+ if IsEmpty(str1) || IsEmpty(str2) {
+ return 0
+ }
+ var i int
+ for i = 0; i < len(str1) && i < len(str2); i++ {
+ if rune(str1[i]) != rune(str2[i]) {
+ break
+ }
+ }
+ if i < len(str2) || i < len(str1) {
+ return i
+ }
+ return INDEX_NOT_FOUND
+}
+
+/*
+IsBlank checks if a string is whitespace or empty (""). Observe the following behavior:
+
+ goutils.IsBlank("") = true
+ goutils.IsBlank(" ") = true
+ goutils.IsBlank("bob") = false
+ goutils.IsBlank(" bob ") = false
+
+Parameter:
+ str - the string to check
+
+Returns:
+ true - if the string is whitespace or empty ("")
+*/
+func IsBlank(str string) bool {
+ strLen := len(str)
+ if str == "" || strLen == 0 {
+ return true
+ }
+ for i := 0; i < strLen; i++ {
+ if unicode.IsSpace(rune(str[i])) == false {
+ return false
+ }
+ }
+ return true
+}
+
+/*
+IndexOf returns the index of the first instance of sub in str, with the search beginning from the
+index start point specified. -1 is returned if sub is not present in str.
+
+An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero.
+A start position greater than the string length returns -1.
+
+Parameters:
+ str - the string to check
+ sub - the substring to find
+ start - the start position; negative treated as zero
+
+Returns:
+ the first index where the sub string was found (always >= start)
+*/
+func IndexOf(str string, sub string, start int) int {
+
+ if start < 0 {
+ start = 0
+ }
+
+ if len(str) < start {
+ return INDEX_NOT_FOUND
+ }
+
+ if IsEmpty(str) || IsEmpty(sub) {
+ return INDEX_NOT_FOUND
+ }
+
+ partialIndex := strings.Index(str[start:len(str)], sub)
+ if partialIndex == -1 {
+ return INDEX_NOT_FOUND
+ }
+ return partialIndex + start
+}
+
+// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise.
+func IsEmpty(str string) bool {
+ return len(str) == 0
+}
diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go
new file mode 100644
index 0000000..034cad8
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/wordutils.go
@@ -0,0 +1,357 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package goutils provides utility functions to manipulate strings in various ways.
+The code snippets below show examples of how to use goutils. Some functions return
+errors while others do not, so usage would vary as a result.
+
+Example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/aokoli/goutils"
+ )
+
+ func main() {
+
+ // EXAMPLE 1: A goutils function which returns no errors
+ fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
+
+
+
+ // EXAMPLE 2: A goutils function which returns an error
+ rand1, err1 := goutils.Random (-1, 0, 0, true, true)
+
+ if err1 != nil {
+ fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
+ } else {
+ fmt.Println(rand1)
+ }
+ }
+*/
+package goutils
+
+import (
+ "bytes"
+ "strings"
+ "unicode"
+)
+
+// VERSION indicates the current version of goutils
+const VERSION = "1.0.0"
+
+/*
+Wrap wraps a single line of text, identifying words by ' '.
+New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped.
+Leading spaces on a new line are stripped. Trailing spaces are not stripped.
+
+Parameters:
+ str - the string to be word wrapped
+ wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
+
+Returns:
+ a line with newlines inserted
+*/
+func Wrap(str string, wrapLength int) string {
+ return WrapCustom(str, wrapLength, "", false)
+}
+
+/*
+WrapCustom wraps a single line of text, identifying words by ' '.
+Leading spaces on a new line are stripped. Trailing spaces are not stripped.
+
+Parameters:
+ str - the string to be word wrapped
+ wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
+ newLineStr - the string to insert for a new line, "" uses '\n'
+ wrapLongWords - true if long words (such as URLs) should be wrapped
+
+Returns:
+ a line with newlines inserted
+*/
+func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string {
+
+ if str == "" {
+ return ""
+ }
+ if newLineStr == "" {
+ newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons
+ }
+ if wrapLength < 1 {
+ wrapLength = 1
+ }
+
+ inputLineLength := len(str)
+ offset := 0
+
+ var wrappedLine bytes.Buffer
+
+ for inputLineLength-offset > wrapLength {
+
+ if rune(str[offset]) == ' ' {
+ offset++
+ continue
+ }
+
+ end := wrapLength + offset + 1
+ spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset
+
+ if spaceToWrapAt >= offset {
+ // normal word (not longer than wrapLength)
+ wrappedLine.WriteString(str[offset:spaceToWrapAt])
+ wrappedLine.WriteString(newLineStr)
+ offset = spaceToWrapAt + 1
+
+ } else {
+ // long word or URL
+ if wrapLongWords {
+ end := wrapLength + offset
+ // long words are wrapped one line at a time
+ wrappedLine.WriteString(str[offset:end])
+ wrappedLine.WriteString(newLineStr)
+ offset += wrapLength
+ } else {
+ // long words aren't wrapped, just extended beyond limit
+ end := wrapLength + offset
+ index := strings.IndexRune(str[end:len(str)], ' ')
+ if index == -1 {
+ wrappedLine.WriteString(str[offset:len(str)])
+ offset = inputLineLength
+ } else {
+ spaceToWrapAt = index + end
+ wrappedLine.WriteString(str[offset:spaceToWrapAt])
+ wrappedLine.WriteString(newLineStr)
+ offset = spaceToWrapAt + 1
+ }
+ }
+ }
+ }
+
+ wrappedLine.WriteString(str[offset:len(str)])
+
+ return wrappedLine.String()
+
+}
+
+/*
+Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed.
+To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune).
+The delimiters represent a set of characters understood to separate words. The first string character
+and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "".
+Capitalization uses the Unicode title case, normally equivalent to upper case.
+
+Parameters:
+ str - the string to capitalize
+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
+
+Returns:
+ capitalized string
+*/
+func Capitalize(str string, delimiters ...rune) string {
+
+ var delimLen int
+
+ if delimiters == nil {
+ delimLen = -1
+ } else {
+ delimLen = len(delimiters)
+ }
+
+ if str == "" || delimLen == 0 {
+ return str
+ }
+
+ buffer := []rune(str)
+ capitalizeNext := true
+ for i := 0; i < len(buffer); i++ {
+ ch := buffer[i]
+ if isDelimiter(ch, delimiters...) {
+ capitalizeNext = true
+ } else if capitalizeNext {
+ buffer[i] = unicode.ToTitle(ch)
+ capitalizeNext = false
+ }
+ }
+ return string(buffer)
+
+}
+
+/*
+CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a
+titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood
+to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized.
+Capitalization uses the Unicode title case, normally equivalent to upper case.
+
+Parameters:
+ str - the string to capitalize fully
+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
+
+Returns:
+ capitalized string
+*/
+func CapitalizeFully(str string, delimiters ...rune) string {
+
+ var delimLen int
+
+ if delimiters == nil {
+ delimLen = -1
+ } else {
+ delimLen = len(delimiters)
+ }
+
+ if str == "" || delimLen == 0 {
+ return str
+ }
+ str = strings.ToLower(str)
+ return Capitalize(str, delimiters...)
+}
+
+/*
+Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed.
+The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter
+character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char).
+
+Parameters:
+ str - the string to uncapitalize fully
+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
+
+Returns:
+ uncapitalized string
+*/
+func Uncapitalize(str string, delimiters ...rune) string {
+
+ var delimLen int
+
+ if delimiters == nil {
+ delimLen = -1
+ } else {
+ delimLen = len(delimiters)
+ }
+
+ if str == "" || delimLen == 0 {
+ return str
+ }
+
+ buffer := []rune(str)
+ uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char.
+ for i := 0; i < len(buffer); i++ {
+ ch := buffer[i]
+ if isDelimiter(ch, delimiters...) {
+ uncapitalizeNext = true
+ } else if uncapitalizeNext {
+ buffer[i] = unicode.ToLower(ch)
+ uncapitalizeNext = false
+ }
+ }
+ return string(buffer)
+}
+
+/*
+SwapCase swaps the case of a string using a word based algorithm.
+
+Conversion algorithm:
+
+ Upper case character converts to Lower case
+ Title case character converts to Lower case
+ Lower case character after Whitespace or at start converts to Title case
+ Other Lower case character converts to Upper case
+ Whitespace is defined by unicode.IsSpace(char).
+
+Parameters:
+ str - the string to swap case
+
+Returns:
+ the changed string
+*/
+func SwapCase(str string) string {
+ if str == "" {
+ return str
+ }
+ buffer := []rune(str)
+
+ whitespace := true
+
+ for i := 0; i < len(buffer); i++ {
+ ch := buffer[i]
+ if unicode.IsUpper(ch) {
+ buffer[i] = unicode.ToLower(ch)
+ whitespace = false
+ } else if unicode.IsTitle(ch) {
+ buffer[i] = unicode.ToLower(ch)
+ whitespace = false
+ } else if unicode.IsLower(ch) {
+ if whitespace {
+ buffer[i] = unicode.ToTitle(ch)
+ whitespace = false
+ } else {
+ buffer[i] = unicode.ToUpper(ch)
+ }
+ } else {
+ whitespace = unicode.IsSpace(ch)
+ }
+ }
+ return string(buffer)
+}
+
+/*
+Initials extracts the initial letters from each word in the string. The first letter of the string and all first
+letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters
+parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string.
+
+Parameters:
+ str - the string to get initials from
+ delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter
+Returns:
+ string of initial letters
+*/
+func Initials(str string, delimiters ...rune) string {
+ if str == "" {
+ return str
+ }
+ if delimiters != nil && len(delimiters) == 0 {
+ return ""
+ }
+ strLen := len(str)
+ var buf bytes.Buffer
+ lastWasGap := true
+ for i := 0; i < strLen; i++ {
+ ch := rune(str[i])
+
+ if isDelimiter(ch, delimiters...) {
+ lastWasGap = true
+ } else if lastWasGap {
+ buf.WriteRune(ch)
+ lastWasGap = false
+ }
+ }
+ return buf.String()
+}
+
+// private function (lower case func name)
+func isDelimiter(ch rune, delimiters ...rune) bool {
+ if delimiters == nil {
+ return unicode.IsSpace(ch)
+ }
+ for _, delimiter := range delimiters {
+ if ch == delimiter {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml
new file mode 100644
index 0000000..096369d
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - tip
+
+# Setting sudo access to false will let Travis CI use containers rather than
+# VMs to run the tests. For more details see:
+# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
+# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
+sudo: false
+
+script:
+ - make setup
+ - make test
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/06e3328629952dabe3e0
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: never # options: [always|never|change] default: always
diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md
new file mode 100644
index 0000000..e405c9a
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md
@@ -0,0 +1,109 @@
+# 1.5.0 (2019-09-11)
+
+## Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+## Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+## Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+# 1.4.2 (2018-04-10)
+
+## Changed
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+## Fixed
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+# 1.4.1 (2018-04-02)
+
+## Fixed
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+# 1.4.0 (2017-10-04)
+
+## Changed
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+# 1.3.1 (2017-07-10)
+
+## Fixed
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+# 1.3.0 (2017-05-02)
+
+## Added
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+## Fixed
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+## Changed
+- #55: The godoc icon moved from png to svg
+
+# 1.2.3 (2017-04-03)
+
+## Fixed
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+# Release 1.2.2 (2016-12-13)
+
+## Fixed
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+# Release 1.2.1 (2016-11-28)
+
+## Fixed
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+ properly.
+
+# Release 1.2.0 (2016-11-04)
+
+## Added
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+## Fixed
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+ might not satisfy the intended compatibility. The change here ignores pre-releases
+ on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+ constraint. For example, `^1.2.3` will ignore pre-releases while
+ `^1.2.3-alpha` will include them.
+
+# Release 1.1.1 (2016-06-30)
+
+## Changed
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+# Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+ constraint.
+
+# Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+# Release 1.0.0 (2015-10-20)
+
+- Initial release
diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt
new file mode 100644
index 0000000..9ff7da9
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile
new file mode 100644
index 0000000..a7a1b4e
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/Makefile
@@ -0,0 +1,36 @@
+.PHONY: setup
+setup:
+ go get -u gopkg.in/alecthomas/gometalinter.v1
+ gometalinter.v1 --install
+
+.PHONY: test
+test: validate lint
+ @echo "==> Running tests"
+ go test -v
+
+.PHONY: validate
+validate:
+ @echo "==> Running static validations"
+ @gometalinter.v1 \
+ --disable-all \
+ --enable deadcode \
+ --severity deadcode:error \
+ --enable gofmt \
+ --enable gosimple \
+ --enable ineffassign \
+ --enable misspell \
+ --enable vet \
+ --tests \
+ --vendor \
+ --deadline 60s \
+ ./... || exit_code=1
+
+.PHONY: lint
+lint:
+ @echo "==> Running linters"
+ @gometalinter.v1 \
+ --disable-all \
+ --enable golint \
+ --vendor \
+ --deadline 60s \
+ ./... || :
diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md
new file mode 100644
index 0000000..1b52d2f
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/README.md
@@ -0,0 +1,194 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[](https://masterminds.github.io/stability/active.html)
+[](https://travis-ci.org/Masterminds/semver) [](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [](https://godoc.org/github.com/Masterminds/semver) [](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+If you are looking for a command line tool for version comparisons please see
+[vert](https://github.com/Masterminds/vert) which uses this library.
+
+## Parsing Semantic Versions
+
+To parse a semantic version use the `NewVersion` function. For example,
+
+```go
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+```
+
+If there is an error the version wasn't parseable. The version object has methods
+to get the parts of the version, compare it to other versions, convert the
+version back into a string, and get the original string. For more details
+please see the [documentation](https://godoc.org/github.com/Masterminds/semver).
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
+package from the standard library. For example,
+
+```go
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+```
+
+## Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma separated and comparisons. These are then separated by || separated or
+comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+## Working With Pre-release Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precidence, pre-releases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification pre-releases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer comparisons without a pre-release comparator will skip pre-release versions.
+For example, `>=1.2.3` will skip pre-releases when looking at a list of releases
+while `>=1.2.3-0` will evaluate and find pre-releases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+## Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
+
+## Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the pack level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+## Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+## Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes. This is useful
+when comparisons of API versions as a major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+ c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+
+ // Validate a version against a constraint.
+ a, msgs := c.Validate(v)
+ // a is false
+ for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+ }
+```
+
+# Fuzzing
+
+ [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing.
+
+1. `go-fuzz-build`
+2. `go-fuzz -workdir=fuzz`
+
+# Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml
new file mode 100644
index 0000000..b2778df
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/appveyor.yml
@@ -0,0 +1,44 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\Masterminds\semver
+shallow_clone: true
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+install:
+ - go version
+ - go env
+ - go get -u gopkg.in/alecthomas/gometalinter.v1
+ - set PATH=%PATH%;%GOPATH%\bin
+ - gometalinter.v1.exe --install
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - "gometalinter.v1 \
+ --disable-all \
+ --enable deadcode \
+ --severity deadcode:error \
+ --enable gofmt \
+ --enable gosimple \
+ --enable ineffassign \
+ --enable misspell \
+ --enable vet \
+ --tests \
+ --vendor \
+ --deadline 60s \
+ ./... || exit_code=1"
+ - "gometalinter.v1 \
+ --disable-all \
+ --enable golint \
+ --vendor \
+ --deadline 60s \
+ ./... || :"
+ - go test -v
+
+deploy: off
diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go
new file mode 100644
index 0000000..a782358
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/collection.go
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+ return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+ return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go
new file mode 100644
index 0000000..b94b934
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/constraints.go
@@ -0,0 +1,423 @@
+package semver
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+ constraints [][]*constraint
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+ // Rewrite - ranges into a comparison operation.
+ c = rewriteRange(c)
+
+ ors := strings.Split(c, "||")
+ or := make([][]*constraint, len(ors))
+ for k, v := range ors {
+ cs := strings.Split(v, ",")
+ result := make([]*constraint, len(cs))
+ for i, s := range cs {
+ pc, err := parseConstraint(s)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = pc
+ }
+ or[k] = result
+ }
+
+ o := &Constraints{constraints: or}
+ return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ // loop over the ORs and check the inner ANDs
+ for _, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ if !c.check(v) {
+ joy = false
+ break
+ }
+ }
+
+ if joy {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+ // loop over the ORs and check the inner ANDs
+ var e []error
+
+ // Capture the prerelease message only once. When it happens the first time
+ // this var is marked
+ var prerelesase bool
+ for _, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ // Before running the check handle the case there the version is
+ // a prerelease and the check is not searching for prereleases.
+ if c.con.pre == "" && v.pre != "" {
+ if !prerelesase {
+ em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ e = append(e, em)
+ prerelesase = true
+ }
+ joy = false
+
+ } else {
+
+ if !c.check(v) {
+ em := fmt.Errorf(c.msg, v, c.orig)
+ e = append(e, em)
+ joy = false
+ }
+ }
+ }
+
+ if joy {
+ return true, []error{}
+ }
+ }
+
+ return false, e
+}
+
+var constraintOps map[string]cfunc
+var constraintMsg map[string]string
+var constraintRegex *regexp.Regexp
+
+func init() {
+ constraintOps = map[string]cfunc{
+ "": constraintTildeOrEqual,
+ "=": constraintTildeOrEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "=>": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "=<": constraintLessThanEqual,
+ "~": constraintTilde,
+ "~>": constraintTilde,
+ "^": constraintCaret,
+ }
+
+ constraintMsg = map[string]string{
+ "": "%s is not equal to %s",
+ "=": "%s is not equal to %s",
+ "!=": "%s is equal to %s",
+ ">": "%s is less than or equal to %s",
+ "<": "%s is greater than or equal to %s",
+ ">=": "%s is less than %s",
+ "=>": "%s is less than %s",
+ "<=": "%s is greater than %s",
+ "=<": "%s is greater than %s",
+ "~": "%s does not have same major and minor version as %s",
+ "~>": "%s does not have same major and minor version as %s",
+ "^": "%s does not have same major version as %s",
+ }
+
+ ops := make([]string, 0, len(constraintOps))
+ for k := range constraintOps {
+ ops = append(ops, regexp.QuoteMeta(k))
+ }
+
+ constraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ strings.Join(ops, "|"),
+ cvRegex))
+
+ constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+ `\s*(%s)\s+-\s+(%s)\s*`,
+ cvRegex, cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+ // The callback function for the restraint. It performs the logic for
+ // the constraint.
+ function cfunc
+
+ msg string
+
+ // The version used in the constraint check. For example, if a constraint
+ // is '<= 2.0.0' the con a version instance representing 2.0.0.
+ con *Version
+
+ // The original parsed version (e.g., 4.x from != 4.x)
+ orig string
+
+ // When an x is used as part of the version (e.g., 1.x)
+ minorDirty bool
+ dirty bool
+ patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version) bool {
+ return c.function(v, c)
+}
+
+type cfunc func(v *Version, c *constraint) bool
+
+func parseConstraint(c string) (*constraint, error) {
+ m := constraintRegex.FindStringSubmatch(c)
+ if m == nil {
+ return nil, fmt.Errorf("improper constraint: %s", c)
+ }
+
+ ver := m[2]
+ orig := ver
+ minorDirty := false
+ patchDirty := false
+ dirty := false
+ if isX(m[3]) {
+ ver = "0.0.0"
+ dirty = true
+ } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+ minorDirty = true
+ dirty = true
+ ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+ } else if isX(strings.TrimPrefix(m[5], ".")) {
+ dirty = true
+ patchDirty = true
+ ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+ }
+
+ con, err := NewVersion(ver)
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint Parser Error")
+ }
+
+ cs := &constraint{
+ function: constraintOps[m[1]],
+ msg: constraintMsg[m[1]],
+ con: con,
+ orig: orig,
+ minorDirty: minorDirty,
+ patchDirty: patchDirty,
+ dirty: dirty,
+ }
+ return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint) bool {
+ if c.dirty {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if c.con.Major() != v.Major() {
+ return true
+ }
+ if c.con.Minor() != v.Minor() && !c.minorDirty {
+ return true
+ } else if c.minorDirty {
+ return false
+ }
+
+ return false
+ }
+
+ return !v.Equal(c.con)
+}
+
+func constraintGreaterThan(v *Version, c *constraint) bool {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ return v.Compare(c.con) == 1
+}
+
+func constraintLessThan(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if !c.dirty {
+ return v.Compare(c.con) < 0
+ }
+
+ if v.Major() > c.con.Major() {
+ return false
+ } else if v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false
+ }
+
+ return true
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint) bool {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ return v.Compare(c.con) >= 0
+}
+
+func constraintLessThanEqual(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if !c.dirty {
+ return v.Compare(c.con) <= 0
+ }
+
+ if v.Major() > c.con.Major() {
+ return false
+ } else if v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false
+ }
+
+ return true
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if v.LessThan(c.con) {
+ return false
+ }
+
+ // ~0.0.0 is a special case where all constraints are accepted. It's
+ // equivalent to >= 0.0.0.
+ if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+ !c.minorDirty && !c.patchDirty {
+ return true
+ }
+
+ if v.Major() != c.con.Major() {
+ return false
+ }
+
+ if v.Minor() != c.con.Minor() && !c.minorDirty {
+ return false
+ }
+
+ return true
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if c.dirty {
+ c.msg = constraintMsg["~"]
+ return constraintTilde(v, c)
+ }
+
+ return v.Equal(c.con)
+}
+
+// ^* --> (any)
+// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0
+// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0
+// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0
+// ^1.2.3 --> >=1.2.3, <2.0.0
+// ^1.2.0 --> >=1.2.0, <2.0.0
+func constraintCaret(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if v.LessThan(c.con) {
+ return false
+ }
+
+ if v.Major() != c.con.Major() {
+ return false
+ }
+
+ return true
+}
+
+var constraintRangeRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func isX(x string) bool {
+ switch x {
+ case "x", "*", "X":
+ return true
+ default:
+ return false
+ }
+}
+
+func rewriteRange(i string) string {
+ m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+ if m == nil {
+ return i
+ }
+ o := i
+ for _, v := range m {
+ t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+ o = strings.Replace(o, v[0], t, 1)
+ }
+
+ return o
+}
diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go
new file mode 100644
index 0000000..6a6c24c
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/doc.go
@@ -0,0 +1,115 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+ * Parse semantic versions
+ * Sort semantic versions
+ * Check if a semantic version fits within a set of constraints
+ * Optionally work with a `v` prefix
+
+Parsing Semantic Versions
+
+To parse a semantic version use the `NewVersion` function. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+If there is an error the version wasn't parseable. The version object has methods
+to get the parts of the version, compare it to other versions, convert the
+version back into a string, and get the original string. For more details
+please see the documentation at https://godoc.org/github.com/Masterminds/semver.
+
+Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+
+Checking Version Constraints
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, err := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+
+Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma separated and comparisons. These are then separated by || separated or
+comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+ * `=`: equal (aliased to no operator)
+ * `!=`: not equal
+ * `>`: greater than
+ * `<`: less than
+ * `>=`: greater than or equal to
+ * `<=`: less than or equal to
+
+Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+ * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+ * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
+
+Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the pack level comparison (see tilde below). For example,
+
+ * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+ * `>= 1.2.x` is equivalent to `>= 1.2.0`
+ * `<= 2.x` is equivalent to `<= 3`
+ * `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+ * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+ * `~1` is equivalent to `>= 1, < 2`
+ * `~2.3` is equivalent to `>= 2.3, < 2.4`
+ * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+ * `~1.x` is equivalent to `>= 1, < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes. This is useful
+when comparisons of API versions as a major change is API breaking. For example,
+
+ * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+ * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+ * `^2.3` is equivalent to `>= 2.3, < 3`
+ * `^2.x` is equivalent to `>= 2.0.0, < 3`
+*/
+package semver
diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go
new file mode 100644
index 0000000..400d4f9
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/version.go
@@ -0,0 +1,425 @@
+package semver
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+var validPrereleaseRegex *regexp.Regexp
+
+var (
+ // ErrInvalidSemVer is returned a version is found to be invalid when
+ // being parsed.
+ ErrInvalidSemVer = errors.New("Invalid Semantic Version")
+
+ // ErrInvalidMetadata is returned when the metadata is an invalid format
+ ErrInvalidMetadata = errors.New("Invalid Metadata string")
+
+ // ErrInvalidPrerelease is returned when the pre-release is an invalid format
+ ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
+)
+
+// SemVerRegex is the regular expression used to parse a semantic version.
+const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// ValidPrerelease is the regular expression which validates
+// both prerelease and metadata values.
+const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$`
+
+// Version represents a single semantic version.
+type Version struct {
+ major, minor, patch int64
+ pre string
+ metadata string
+ original string
+}
+
+func init() {
+ versionRegex = regexp.MustCompile("^" + SemVerRegex + "$")
+ validPrereleaseRegex = regexp.MustCompile(ValidPrerelease)
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version.
+func NewVersion(v string) (*Version, error) {
+ m := versionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[8],
+ pre: m[5],
+ original: v,
+ }
+
+ var temp int64
+ temp, err := strconv.ParseInt(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ sv.major = temp
+
+ if m[2] != "" {
+ temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ sv.minor = temp
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ sv.patch = temp
+ } else {
+ sv.patch = 0
+ }
+
+ return sv, nil
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+ sv, err := NewVersion(v)
+ if err != nil {
+ panic(err)
+ }
+ return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v *Version) String() string {
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+ return v.original
+}
+
+// Major returns the major version.
+func (v *Version) Major() int64 {
+ return v.major
+}
+
+// Minor returns the minor version.
+func (v *Version) Minor() int64 {
+ return v.minor
+}
+
+// Patch returns the patch version.
+func (v *Version) Patch() int64 {
+ return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v *Version) Prerelease() string {
+ return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v *Version) Metadata() string {
+ return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v *Version) originalVPrefix() string {
+
+ // Note, only lowercase v is supported as a prefix by the parser.
+ if v.original != "" && v.original[:1] == "v" {
+ return v.original[:1]
+ }
+ return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps curent patch value
+func (v Version) IncPatch() Version {
+ vNext := v
+ // according to http://semver.org/#spec-item-9
+ // Pre-release versions have a lower precedence than the associated normal version.
+ // according to http://semver.org/#spec-item-10
+ // Build metadata SHOULD be ignored when determining version precedence.
+ if v.pre != "" {
+ vNext.metadata = ""
+ vNext.pre = ""
+ } else {
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = v.patch + 1
+ }
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = v.minor + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = 0
+ vNext.major = v.major + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hypen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+ vNext := v
+ if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) {
+ return vNext, ErrInvalidPrerelease
+ }
+ vNext.pre = prerelease
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+ vNext := v
+ if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) {
+ return vNext, ErrInvalidMetadata
+ }
+ vNext.metadata = metadata
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+ return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease.
+func (v *Version) Compare(o *Version) int {
+ // Compare the major, minor, and patch version for differences. If a
+ // difference is found return the comparison.
+ if d := compareSegment(v.Major(), o.Major()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+ return d
+ }
+
+ // At this point the major, minor, and patch versions are the same.
+ ps := v.pre
+ po := o.Prerelease()
+
+ if ps == "" && po == "" {
+ return 0
+ }
+ if ps == "" {
+ return 1
+ }
+ if po == "" {
+ return -1
+ }
+
+ return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ temp = nil
+ return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v *Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func compareSegment(v, o int64) int {
+ if v < o {
+ return -1
+ }
+ if v > o {
+ return 1
+ }
+
+ return 0
+}
+
+func comparePrerelease(v, o string) int {
+
+ // split the prelease versions by their part. The separator, per the spec,
+ // is a .
+ sparts := strings.Split(v, ".")
+ oparts := strings.Split(o, ".")
+
+ // Find the longer length of the parts to know how many loop iterations to
+ // go through.
+ slen := len(sparts)
+ olen := len(oparts)
+
+ l := slen
+ if olen > slen {
+ l = olen
+ }
+
+ // Iterate over each part of the prereleases to compare the differences.
+ for i := 0; i < l; i++ {
+ // Since the lentgh of the parts can be different we need to create
+ // a placeholder. This is to avoid out of bounds issues.
+ stemp := ""
+ if i < slen {
+ stemp = sparts[i]
+ }
+
+ otemp := ""
+ if i < olen {
+ otemp = oparts[i]
+ }
+
+ d := comparePrePart(stemp, otemp)
+ if d != 0 {
+ return d
+ }
+ }
+
+ // Reaching here means two versions are of equal value but have different
+ // metadata (the part following a +). They are not identical in string form
+ // but the version comparison finds them to be equal.
+ return 0
+}
+
+func comparePrePart(s, o string) int {
+ // Fastpath if they are equal
+ if s == o {
+ return 0
+ }
+
+ // When s or o are empty we can use the other in an attempt to determine
+ // the response.
+ if s == "" {
+ if o != "" {
+ return -1
+ }
+ return 1
+ }
+
+ if o == "" {
+ if s != "" {
+ return 1
+ }
+ return -1
+ }
+
+ // When comparing strings "99" is greater than "103". To handle
+ // cases like this we need to detect numbers and compare them. According
+ // to the semver spec, numbers are always positive. If there is a - at the
+ // start like -99 this is to be evaluated as an alphanum. numbers always
+ // have precedence over alphanum. Parsing as Uints because negative numbers
+ // are ignored.
+
+ oi, n1 := strconv.ParseUint(o, 10, 64)
+ si, n2 := strconv.ParseUint(s, 10, 64)
+
+ // The case where both are strings compare the strings
+ if n1 != nil && n2 != nil {
+ if s > o {
+ return 1
+ }
+ return -1
+ } else if n1 != nil {
+ // o is a string and s is a number
+ return -1
+ } else if n2 != nil {
+ // s is a string and o is a number
+ return 1
+ }
+ // Both are numbers
+ if si > oi {
+ return 1
+ }
+ return -1
+
+}
diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go
new file mode 100644
index 0000000..b42bcd6
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/version_fuzz.go
@@ -0,0 +1,10 @@
+// +build gofuzz
+
+package semver
+
+func Fuzz(data []byte) int {
+ if _, err := NewVersion(string(data)); err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/github.com/Masterminds/sprig/.gitignore b/vendor/github.com/Masterminds/sprig/.gitignore
new file mode 100644
index 0000000..5e3002f
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+/.glide
diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml
new file mode 100644
index 0000000..b9da8b8
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/.travis.yml
@@ -0,0 +1,26 @@
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+# Setting sudo access to false will let Travis CI use containers rather than
+# VMs to run the tests. For more details see:
+# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
+# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
+sudo: false
+
+script:
+ - make setup test
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/06e3328629952dabe3e0
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: never # options: [always|never|change] default: always
diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/CHANGELOG.md
new file mode 100644
index 0000000..6a79fbd
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/CHANGELOG.md
@@ -0,0 +1,282 @@
+# Changelog
+
+## Release 2.22.0 (2019-10-02)
+
+### Added
+
+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
+- #195: Added deepCopy function for use with dicts
+
+### Changed
+
+- Updated merge and mergeOverwrite documentation to explain copying and how to
+ use deepCopy with it
+
+## Release 2.21.0 (2019-09-18)
+
+### Added
+
+- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
+- #128: Added toDecimal support (thanks @Dean-Coakley)
+- #169: Added list contcat (thanks @astorath)
+- #174: Added deepEqual function (thanks @bonifaido)
+- #170: Added url parse and join functions (thanks @astorath)
+
+### Changed
+
+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
+
+### Fixed
+
+- #172: Fix semver wildcard example (thanks @piepmatz)
+- #175: Fix dateInZone doc example (thanks @s3than)
+
+## Release 2.20.0 (2019-06-18)
+
+### Added
+
+- #164: Adding function to get unix epoch for a time (@mattfarina)
+- #166: Adding tests for date_in_zone (@mattfarina)
+
+### Changed
+
+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
+- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
+- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
+
+### Fixed
+
+## Release 2.19.0 (2019-03-02)
+
+IMPORTANT: This release reverts a change from 2.18.0
+
+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
+
+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
+
+### Changed
+
+- Fix substr panic 35fb796 (Alexey igrychev)
+- Remove extra period 1eb7729 (Matthew Lorimor)
+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
+- README edits/fixes/suggestions 08fe136 (Lauri Apple)
+
+
+## Release 2.18.0 (2019-02-12)
+
+### Added
+
+- Added mergeOverwrite function
+- cryptographic functions that use secure random (see fe1de12)
+
+### Changed
+
+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
+- Handle has for nil list 9c10885 (Daniel Cohen)
+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
+- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
+
+### Fixed
+
+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
+- Fix substr var names and comments d581f80 (Dean Coakley)
+- Fix substr documentation 2737203 (Dean Coakley)
+
+## Release 2.17.1 (2019-01-03)
+
+### Fixed
+
+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
+
+## Release 2.17.0 (2019-01-03)
+
+### Added
+
+- adds alder32sum function and test 6908fc2 (marshallford)
+- Added kebabcase function ca331a1 (Ilyes512)
+
+### Changed
+
+- Update goutils to 1.1.0 4e1125d (Matt Butcher)
+
+### Fixed
+
+- Fix 'has' documentation e3f2a85 (dean-coakley)
+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
+- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
+
+## Release 2.16.0 (2018-08-13)
+
+### Added
+
+- add splitn function fccb0b0 (Helgi Þorbjörnsson)
+- Add slice func df28ca7 (gongdo)
+- Generate serial number a3bdffd (Cody Coons)
+- Extract values of dict with values function df39312 (Lawrence Jones)
+
+### Changed
+
+- Modify panic message for list.slice ae38335 (gongdo)
+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
+- Remove duplicated documentation 1d97af1 (Matthew Fisher)
+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
+
+### Fixed
+
+- Fix file permissions c5f40b5 (gongdo)
+- Fix example for buildCustomCert 7779e0d (Tin Lam)
+
+## Release 2.15.0 (2018-04-02)
+
+### Added
+
+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
+- #66: Add ternary function (thanks @binoculars)
+- #67: Allow keys function to take multiple dicts (thanks @binoculars)
+- #89: Added sha1sum to crypto function (thanks @benkeil)
+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
+- #92: Add travis testing for go 1.10
+- #93: Adding appveyor config for windows testing
+
+### Changed
+
+- #90: Updating to more recent dependencies
+- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
+
+### Fixed
+
+- #76: Fixed documentation typos (thanks @Thiht)
+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
+
+## Release 2.14.1 (2017-12-01)
+
+### Fixed
+
+- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
+- #61: Removing line with {{ due to blocking github pages genertion
+- #64: Update the list functions to handle int, string, and other slices for compatibility
+
+## Release 2.14.0 (2017-10-06)
+
+This new version of Sprig adds a set of functions for generating and working with SSL certificates.
+
+- `genCA` generates an SSL Certificate Authority
+- `genSelfSignedCert` generates an SSL self-signed certificate
+- `genSignedCert` generates an SSL certificate and key based on a given CA
+
+## Release 2.13.0 (2017-09-18)
+
+This release adds new functions, including:
+
+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
+- `floor`, `ceil`, and `round` math functions
+- `toDate` converts a string to a date
+- `nindent` is just like `indent` but also prepends a new line
+- `ago` returns the time from `time.Now`
+
+### Added
+
+- #40: Added basic regex functionality (thanks @alanquillin)
+- #41: Added ceil floor and round functions (thanks @alanquillin)
+- #48: Added toDate function (thanks @andreynering)
+- #50: Added nindent function (thanks @binoculars)
+- #46: Added ago function (thanks @slayer)
+
+### Changed
+
+- #51: Updated godocs to include new string functions (thanks @curtisallen)
+- #49: Added ability to merge multiple dicts (thanks @binoculars)
+
+## Release 2.12.0 (2017-05-17)
+
+- `snakecase`, `camelcase`, and `shuffle` are three new string functions
+- `fail` allows you to bail out of a template render when conditions are not met
+
+## Release 2.11.0 (2017-05-02)
+
+- Added `toJson` and `toPrettyJson`
+- Added `merge`
+- Refactored documentation
+
+## Release 2.10.0 (2017-03-15)
+
+- Added `semver` and `semverCompare` for Semantic Versions
+- `list` replaces `tuple`
+- Fixed issue with `join`
+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+
+## Release 2.9.0 (2017-02-23)
+
+- Added `splitList` to split a list
+- Added crypto functions of `genPrivateKey` and `derivePassword`
+
+## Release 2.8.0 (2016-12-21)
+
+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
+
+## Release 2.7.0 (2016-12-01)
+
+- Added `sha256sum` to generate a hash of an input
+- Added functions to convert a numeric or string to `int`, `int64`, `float64`
+
+## Release 2.6.0 (2016-10-03)
+
+- Added a `uuidv4` template function for generating UUIDs inside of a template.
+
+## Release 2.5.0 (2016-08-19)
+
+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
+
+## Release 2.4.0 (2016-08-16)
+
+- Adds two functions: `until` and `untilStep`
+
+## Release 2.3.0 (2016-06-21)
+
+- cat: Concatenate strings with whitespace separators.
+- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
+- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
+- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
+
+## Release 2.2.0 (2016-04-21)
+
+- Added a `genPrivateKey` function (Thanks @bacongobbler)
+
+## Release 2.1.0 (2016-03-30)
+
+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
+- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
+
+## Release 2.0.0 (2016-03-29)
+
+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
+
+- `min` complements `max` (formerly `biggest`)
+- `empty` indicates that a value is the empty value for its type
+- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- Date formatters have been added for HTML dates (as used in `date` input fields)
+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
+
+## Release 1.2.0 (2016-02-01)
+
+- Added quote and squote
+- Added b32enc and b32dec
+- add now takes varargs
+- biggest now takes varargs
+
+## Release 1.1.0 (2015-12-29)
+
+- Added #4: Added contains function. strings.Contains, but with the arguments
+ switched to simplify common pipelines. (thanks krancour)
+- Added Travis-CI testing support
+
+## Release 1.0.0 (2015-12-23)
+
+- Initial release
diff --git a/vendor/github.com/Masterminds/sprig/LICENSE.txt b/vendor/github.com/Masterminds/sprig/LICENSE.txt
new file mode 100644
index 0000000..5c95acc
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/LICENSE.txt
@@ -0,0 +1,20 @@
+Sprig
+Copyright (C) 2013 Masterminds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/sprig/Makefile b/vendor/github.com/Masterminds/sprig/Makefile
new file mode 100644
index 0000000..63a93fd
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/Makefile
@@ -0,0 +1,13 @@
+
+HAS_GLIDE := $(shell command -v glide;)
+
+.PHONY: test
+test:
+ go test -v .
+
+.PHONY: setup
+setup:
+ifndef HAS_GLIDE
+ go get -u github.com/Masterminds/glide
+endif
+ glide install
diff --git a/vendor/github.com/Masterminds/sprig/README.md b/vendor/github.com/Masterminds/sprig/README.md
new file mode 100644
index 0000000..b705695
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/README.md
@@ -0,0 +1,78 @@
+# Sprig: Template functions for Go templates
+[](https://masterminds.github.io/stability/sustained.html)
+[](https://travis-ci.org/Masterminds/sprig)
+
+The Go language comes with a [built-in template
+language](http://golang.org/pkg/text/template/), but not
+very many template functions. Sprig is a library that provides more than 100 commonly
+used template functions.
+
+It is inspired by the template functions found in
+[Twig](http://twig.sensiolabs.org/documentation) and in various
+JavaScript libraries, such as [underscore.js](http://underscorejs.org/).
+
+## Usage
+
+**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for
+detailed instructions and code snippets for the >100 template functions available.
+
+**Go developers**: If you'd like to include Sprig as a library in your program,
+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig).
+
+For standard usage, read on.
+
+### Load the Sprig library
+
+To load the Sprig `FuncMap`:
+
+```go
+
+import (
+ "github.com/Masterminds/sprig"
+ "html/template"
+)
+
+// This example illustrates that the FuncMap *must* be set before the
+// templates themselves are loaded.
+tpl := template.Must(
+ template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
+)
+
+
+```
+
+### Calling the functions inside of templates
+
+By convention, all functions are lowercase. This seems to follow the Go
+idiom for template functions (as opposed to template methods, which are
+TitleCase). For example, this:
+
+```
+{{ "hello!" | upper | repeat 5 }}
+```
+
+produces this:
+
+```
+HELLO!HELLO!HELLO!HELLO!HELLO!
+```
+
+## Principles Driving Our Function Selection
+
+We followed these principles to decide which functions to add and how to implement them:
+
+- Use template functions to build layout. The following
+ types of operations are within the domain of template functions:
+ - Formatting
+ - Layout
+ - Simple type conversions
+ - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
+- Template functions should not return errors unless there is no way to print
+ a sensible value. For example, converting a string to an integer should not
+ produce an error if conversion fails. Instead, it should display a default
+ value.
+- Simple math is necessary for grid layouts, pagers, and so on. Complex math
+ (anything other than arithmetic) should be done outside of templates.
+- Template functions only deal with the data passed into them. They never retrieve
+ data from a source.
+- Finally, do not override core Go template functions.
diff --git a/vendor/github.com/Masterminds/sprig/appveyor.yml b/vendor/github.com/Masterminds/sprig/appveyor.yml
new file mode 100644
index 0000000..d545a98
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/appveyor.yml
@@ -0,0 +1,26 @@
+
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\Masterminds\sprig
+shallow_clone: true
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+install:
+ - go get -u github.com/Masterminds/glide
+ - set PATH=%GOPATH%\bin;%PATH%
+ - go version
+ - go env
+
+build_script:
+ - glide install
+ - go install ./...
+
+test_script:
+ - go test -v
+
+deploy: off
diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/crypto.go
new file mode 100644
index 0000000..7a418ba
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/crypto.go
@@ -0,0 +1,502 @@
+package sprig
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "hash/adler32"
+ "math/big"
+ "net"
+ "time"
+
+ "github.com/google/uuid"
+ "golang.org/x/crypto/scrypt"
+)
+
+func sha256sum(input string) string {
+ hash := sha256.Sum256([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func sha1sum(input string) string {
+ hash := sha1.Sum([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func adler32sum(input string) string {
+ hash := adler32.Checksum([]byte(input))
+ return fmt.Sprintf("%d", hash)
+}
+
+// uuidv4 provides a safe and secure UUID v4 implementation
+func uuidv4() string {
+ return fmt.Sprintf("%s", uuid.New())
+}
+
+var master_password_seed = "com.lyndir.masterpassword"
+
+var password_type_templates = map[string][][]byte{
+ "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")},
+ "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"),
+ []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"),
+ []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"),
+ []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"),
+ []byte("CvccCvcvCvccno")},
+ "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")},
+ "short": {[]byte("Cvcn")},
+ "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")},
+ "pin": {[]byte("nnnn")},
+}
+
+var template_characters = map[byte]string{
+ 'V': "AEIOU",
+ 'C': "BCDFGHJKLMNPQRSTVWXYZ",
+ 'v': "aeiou",
+ 'c': "bcdfghjklmnpqrstvwxyz",
+ 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ",
+ 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz",
+ 'n': "0123456789",
+ 'o': "@&%?,=[]_:-+*$#!'^~;()/.",
+ 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()",
+}
+
+func derivePassword(counter uint32, password_type, password, user, site string) string {
+ var templates = password_type_templates[password_type]
+ if templates == nil {
+ return fmt.Sprintf("cannot find password template %s", password_type)
+ }
+
+ var buffer bytes.Buffer
+ buffer.WriteString(master_password_seed)
+ binary.Write(&buffer, binary.BigEndian, uint32(len(user)))
+ buffer.WriteString(user)
+
+ salt := buffer.Bytes()
+ key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64)
+ if err != nil {
+ return fmt.Sprintf("failed to derive password: %s", err)
+ }
+
+ buffer.Truncate(len(master_password_seed))
+ binary.Write(&buffer, binary.BigEndian, uint32(len(site)))
+ buffer.WriteString(site)
+ binary.Write(&buffer, binary.BigEndian, counter)
+
+ var hmacv = hmac.New(sha256.New, key)
+ hmacv.Write(buffer.Bytes())
+ var seed = hmacv.Sum(nil)
+ var temp = templates[int(seed[0])%len(templates)]
+
+ buffer.Truncate(0)
+ for i, element := range temp {
+ pass_chars := template_characters[element]
+ pass_char := pass_chars[int(seed[i+1])%len(pass_chars)]
+ buffer.WriteByte(pass_char)
+ }
+
+ return buffer.String()
+}
+
+func generatePrivateKey(typ string) string {
+ var priv interface{}
+ var err error
+ switch typ {
+ case "", "rsa":
+ // good enough for government work
+ priv, err = rsa.GenerateKey(rand.Reader, 4096)
+ case "dsa":
+ key := new(dsa.PrivateKey)
+ // again, good enough for government work
+ if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil {
+ return fmt.Sprintf("failed to generate dsa params: %s", err)
+ }
+ err = dsa.GenerateKey(key, rand.Reader)
+ priv = key
+ case "ecdsa":
+ // again, good enough for government work
+ priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ default:
+ return "Unknown type " + typ
+ }
+ if err != nil {
+ return fmt.Sprintf("failed to generate private key: %s", err)
+ }
+
+ return string(pem.EncodeToMemory(pemBlockForKey(priv)))
+}
+
+type DSAKeyFormat struct {
+ Version int
+ P, Q, G, Y, X *big.Int
+}
+
+func pemBlockForKey(priv interface{}) *pem.Block {
+ switch k := priv.(type) {
+ case *rsa.PrivateKey:
+ return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
+ case *dsa.PrivateKey:
+ val := DSAKeyFormat{
+ P: k.P, Q: k.Q, G: k.G,
+ Y: k.Y, X: k.X,
+ }
+ bytes, _ := asn1.Marshal(val)
+ return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes}
+ case *ecdsa.PrivateKey:
+ b, _ := x509.MarshalECPrivateKey(k)
+ return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
+ default:
+ return nil
+ }
+}
+
+type certificate struct {
+ Cert string
+ Key string
+}
+
+func buildCustomCertificate(b64cert string, b64key string) (certificate, error) {
+ crt := certificate{}
+
+ cert, err := base64.StdEncoding.DecodeString(b64cert)
+ if err != nil {
+ return crt, errors.New("unable to decode base64 certificate")
+ }
+
+ key, err := base64.StdEncoding.DecodeString(b64key)
+ if err != nil {
+ return crt, errors.New("unable to decode base64 private key")
+ }
+
+ decodedCert, _ := pem.Decode(cert)
+ if decodedCert == nil {
+ return crt, errors.New("unable to decode certificate")
+ }
+ _, err = x509.ParseCertificate(decodedCert.Bytes)
+ if err != nil {
+ return crt, fmt.Errorf(
+ "error parsing certificate: decodedCert.Bytes: %s",
+ err,
+ )
+ }
+
+ decodedKey, _ := pem.Decode(key)
+ if decodedKey == nil {
+ return crt, errors.New("unable to decode key")
+ }
+ _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes)
+ if err != nil {
+ return crt, fmt.Errorf(
+ "error parsing prive key: decodedKey.Bytes: %s",
+ err,
+ )
+ }
+
+ crt.Cert = string(cert)
+ crt.Key = string(key)
+
+ return crt, nil
+}
+
+func generateCertificateAuthority(
+ cn string,
+ daysValid int,
+) (certificate, error) {
+ ca := certificate{}
+
+ template, err := getBaseCertTemplate(cn, nil, nil, daysValid)
+ if err != nil {
+ return ca, err
+ }
+ // Override KeyUsage and IsCA
+ template.KeyUsage = x509.KeyUsageKeyEncipherment |
+ x509.KeyUsageDigitalSignature |
+ x509.KeyUsageCertSign
+ template.IsCA = true
+
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return ca, fmt.Errorf("error generating rsa key: %s", err)
+ }
+
+ ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv)
+ if err != nil {
+ return ca, err
+ }
+
+ return ca, nil
+}
+
+func generateSelfSignedCertificate(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+) (certificate, error) {
+ cert := certificate{}
+
+ template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid)
+ if err != nil {
+ return cert, err
+ }
+
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return cert, fmt.Errorf("error generating rsa key: %s", err)
+ }
+
+ cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv)
+ if err != nil {
+ return cert, err
+ }
+
+ return cert, nil
+}
+
+func generateSignedCertificate(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+ ca certificate,
+) (certificate, error) {
+ cert := certificate{}
+
+ decodedSignerCert, _ := pem.Decode([]byte(ca.Cert))
+ if decodedSignerCert == nil {
+ return cert, errors.New("unable to decode certificate")
+ }
+ signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes)
+ if err != nil {
+ return cert, fmt.Errorf(
+ "error parsing certificate: decodedSignerCert.Bytes: %s",
+ err,
+ )
+ }
+ decodedSignerKey, _ := pem.Decode([]byte(ca.Key))
+ if decodedSignerKey == nil {
+ return cert, errors.New("unable to decode key")
+ }
+ signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes)
+ if err != nil {
+ return cert, fmt.Errorf(
+ "error parsing prive key: decodedSignerKey.Bytes: %s",
+ err,
+ )
+ }
+
+ template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid)
+ if err != nil {
+ return cert, err
+ }
+
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return cert, fmt.Errorf("error generating rsa key: %s", err)
+ }
+
+ cert.Cert, cert.Key, err = getCertAndKey(
+ template,
+ priv,
+ signerCert,
+ signerKey,
+ )
+ if err != nil {
+ return cert, err
+ }
+
+ return cert, nil
+}
+
+func getCertAndKey(
+ template *x509.Certificate,
+ signeeKey *rsa.PrivateKey,
+ parent *x509.Certificate,
+ signingKey *rsa.PrivateKey,
+) (string, string, error) {
+ derBytes, err := x509.CreateCertificate(
+ rand.Reader,
+ template,
+ parent,
+ &signeeKey.PublicKey,
+ signingKey,
+ )
+ if err != nil {
+ return "", "", fmt.Errorf("error creating certificate: %s", err)
+ }
+
+ certBuffer := bytes.Buffer{}
+ if err := pem.Encode(
+ &certBuffer,
+ &pem.Block{Type: "CERTIFICATE", Bytes: derBytes},
+ ); err != nil {
+ return "", "", fmt.Errorf("error pem-encoding certificate: %s", err)
+ }
+
+ keyBuffer := bytes.Buffer{}
+ if err := pem.Encode(
+ &keyBuffer,
+ &pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: x509.MarshalPKCS1PrivateKey(signeeKey),
+ },
+ ); err != nil {
+ return "", "", fmt.Errorf("error pem-encoding key: %s", err)
+ }
+
+ return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil
+}
+
+func getBaseCertTemplate(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+) (*x509.Certificate, error) {
+ ipAddresses, err := getNetIPs(ips)
+ if err != nil {
+ return nil, err
+ }
+ dnsNames, err := getAlternateDNSStrs(alternateDNS)
+ if err != nil {
+ return nil, err
+ }
+ serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound)
+ if err != nil {
+ return nil, err
+ }
+ return &x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ CommonName: cn,
+ },
+ IPAddresses: ipAddresses,
+ DNSNames: dnsNames,
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)),
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageServerAuth,
+ x509.ExtKeyUsageClientAuth,
+ },
+ BasicConstraintsValid: true,
+ }, nil
+}
+
+func getNetIPs(ips []interface{}) ([]net.IP, error) {
+ if ips == nil {
+ return []net.IP{}, nil
+ }
+ var ipStr string
+ var ok bool
+ var netIP net.IP
+ netIPs := make([]net.IP, len(ips))
+ for i, ip := range ips {
+ ipStr, ok = ip.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing ip: %v is not a string", ip)
+ }
+ netIP = net.ParseIP(ipStr)
+ if netIP == nil {
+ return nil, fmt.Errorf("error parsing ip: %s", ipStr)
+ }
+ netIPs[i] = netIP
+ }
+ return netIPs, nil
+}
+
+func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) {
+ if alternateDNS == nil {
+ return []string{}, nil
+ }
+ var dnsStr string
+ var ok bool
+ alternateDNSStrs := make([]string, len(alternateDNS))
+ for i, dns := range alternateDNS {
+ dnsStr, ok = dns.(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "error processing alternate dns name: %v is not a string",
+ dns,
+ )
+ }
+ alternateDNSStrs[i] = dnsStr
+ }
+ return alternateDNSStrs, nil
+}
+
+func encryptAES(password string, plaintext string) (string, error) {
+ if plaintext == "" {
+ return "", nil
+ }
+
+ key := make([]byte, 32)
+ copy(key, []byte(password))
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return "", err
+ }
+
+ content := []byte(plaintext)
+ blockSize := block.BlockSize()
+ padding := blockSize - len(content)%blockSize
+ padtext := bytes.Repeat([]byte{byte(padding)}, padding)
+ content = append(content, padtext...)
+
+ ciphertext := make([]byte, aes.BlockSize+len(content))
+
+ iv := ciphertext[:aes.BlockSize]
+ if _, err := io.ReadFull(rand.Reader, iv); err != nil {
+ return "", err
+ }
+
+ mode := cipher.NewCBCEncrypter(block, iv)
+ mode.CryptBlocks(ciphertext[aes.BlockSize:], content)
+
+ return base64.StdEncoding.EncodeToString(ciphertext), nil
+}
+
+func decryptAES(password string, crypt64 string) (string, error) {
+ if crypt64 == "" {
+ return "", nil
+ }
+
+ key := make([]byte, 32)
+ copy(key, []byte(password))
+
+ crypt, err := base64.StdEncoding.DecodeString(crypt64)
+ if err != nil {
+ return "", err
+ }
+
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return "", err
+ }
+
+ iv := crypt[:aes.BlockSize]
+ crypt = crypt[aes.BlockSize:]
+ decrypted := make([]byte, len(crypt))
+ mode := cipher.NewCBCDecrypter(block, iv)
+ mode.CryptBlocks(decrypted, crypt)
+
+ return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil
+}
diff --git a/vendor/github.com/Masterminds/sprig/date.go b/vendor/github.com/Masterminds/sprig/date.go
new file mode 100644
index 0000000..d1d6155
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/date.go
@@ -0,0 +1,83 @@
+package sprig
+
+import (
+ "strconv"
+ "time"
+)
+
+// Given a format and a date, format the date string.
+//
+// Date can be a `time.Time` or an `int, int32, int64`.
+// In the later case, it is treated as seconds since UNIX
+// epoch.
+func date(fmt string, date interface{}) string {
+ return dateInZone(fmt, date, "Local")
+}
+
+func htmlDate(date interface{}) string {
+ return dateInZone("2006-01-02", date, "Local")
+}
+
+func htmlDateInZone(date interface{}, zone string) string {
+ return dateInZone("2006-01-02", date, zone)
+}
+
+func dateInZone(fmt string, date interface{}, zone string) string {
+ var t time.Time
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case *time.Time:
+ t = *date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ case int32:
+ t = time.Unix(int64(date), 0)
+ }
+
+ loc, err := time.LoadLocation(zone)
+ if err != nil {
+ loc, _ = time.LoadLocation("UTC")
+ }
+
+ return t.In(loc).Format(fmt)
+}
+
+func dateModify(fmt string, date time.Time) time.Time {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return date
+ }
+ return date.Add(d)
+}
+
+func dateAgo(date interface{}) string {
+ var t time.Time
+
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ }
+ // Drop resolution to seconds
+ duration := time.Since(t).Round(time.Second)
+ return duration.String()
+}
+
+func toDate(fmt, str string) time.Time {
+ t, _ := time.ParseInLocation(fmt, str, time.Local)
+ return t
+}
+
+func unixEpoch(date time.Time) string {
+ return strconv.FormatInt(date.Unix(), 10)
+}
diff --git a/vendor/github.com/Masterminds/sprig/defaults.go b/vendor/github.com/Masterminds/sprig/defaults.go
new file mode 100644
index 0000000..ed6a8ab
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/defaults.go
@@ -0,0 +1,83 @@
+package sprig
+
+import (
+ "encoding/json"
+ "reflect"
+)
+
+// dfault checks whether `given` is set, and returns default if not set.
+//
+// This returns `d` if `given` appears not to be set, and `given` otherwise.
+//
+// For numeric types 0 is unset.
+// For strings, maps, arrays, and slices, len() = 0 is considered unset.
+// For bool, false is unset.
+// Structs are never considered unset.
+//
+// For everything else, including pointers, a nil value is unset.
+func dfault(d interface{}, given ...interface{}) interface{} {
+
+ if empty(given) || empty(given[0]) {
+ return d
+ }
+ return given[0]
+}
+
+// empty returns true if the given value has the zero value for its type.
+func empty(given interface{}) bool {
+ g := reflect.ValueOf(given)
+ if !g.IsValid() {
+ return true
+ }
+
+ // Basically adapted from text/template.isTrue
+ switch g.Kind() {
+ default:
+ return g.IsNil()
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return g.Len() == 0
+ case reflect.Bool:
+ return g.Bool() == false
+ case reflect.Complex64, reflect.Complex128:
+ return g.Complex() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return g.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return g.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return g.Float() == 0
+ case reflect.Struct:
+ return false
+ }
+}
+
+// coalesce returns the first non-empty value.
+func coalesce(v ...interface{}) interface{} {
+ for _, val := range v {
+ if !empty(val) {
+ return val
+ }
+ }
+ return nil
+}
+
+// toJson encodes an item into a JSON string
+func toJson(v interface{}) string {
+ output, _ := json.Marshal(v)
+ return string(output)
+}
+
+// toPrettyJson encodes an item into a pretty (indented) JSON string
+func toPrettyJson(v interface{}) string {
+ output, _ := json.MarshalIndent(v, "", " ")
+ return string(output)
+}
+
+// ternary returns the first value if the last value is true, otherwise returns the second value.
+func ternary(vt interface{}, vf interface{}, v bool) interface{} {
+ if v {
+ return vt
+ }
+
+ return vf
+}
diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/dict.go
new file mode 100644
index 0000000..738405b
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/dict.go
@@ -0,0 +1,119 @@
+package sprig
+
+import (
+ "github.com/imdario/mergo"
+ "github.com/mitchellh/copystructure"
+)
+
+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
+ d[key] = value
+ return d
+}
+
+func unset(d map[string]interface{}, key string) map[string]interface{} {
+ delete(d, key)
+ return d
+}
+
+func hasKey(d map[string]interface{}, key string) bool {
+ _, ok := d[key]
+ return ok
+}
+
+func pluck(key string, d ...map[string]interface{}) []interface{} {
+ res := []interface{}{}
+ for _, dict := range d {
+ if val, ok := dict[key]; ok {
+ res = append(res, val)
+ }
+ }
+ return res
+}
+
+func keys(dicts ...map[string]interface{}) []string {
+ k := []string{}
+ for _, dict := range dicts {
+ for key := range dict {
+ k = append(k, key)
+ }
+ }
+ return k
+}
+
+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+ for _, k := range keys {
+ if v, ok := dict[k]; ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+
+ omit := make(map[string]bool, len(keys))
+ for _, k := range keys {
+ omit[k] = true
+ }
+
+ for k, v := range dict {
+ if _, ok := omit[k]; !ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func dict(v ...interface{}) map[string]interface{} {
+ dict := map[string]interface{}{}
+ lenv := len(v)
+ for i := 0; i < lenv; i += 2 {
+ key := strval(v[i])
+ if i+1 >= lenv {
+ dict[key] = ""
+ continue
+ }
+ dict[key] = v[i+1]
+ }
+ return dict
+}
+
+func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
+ for _, src := range srcs {
+ if err := mergo.Merge(&dst, src); err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ }
+ return dst
+}
+
+func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
+ for _, src := range srcs {
+ if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ }
+ return dst
+}
+
+func values(dict map[string]interface{}) []interface{} {
+ values := []interface{}{}
+ for _, value := range dict {
+ values = append(values, value)
+ }
+
+ return values
+}
+
+func deepCopy(i interface{}) interface{} {
+ c, err := copystructure.Copy(i)
+ if err != nil {
+ panic("deepCopy error: " + err.Error())
+ }
+
+ return c
+}
diff --git a/vendor/github.com/Masterminds/sprig/doc.go b/vendor/github.com/Masterminds/sprig/doc.go
new file mode 100644
index 0000000..8f8f1d7
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/doc.go
@@ -0,0 +1,19 @@
+/*
+Sprig: Template functions for Go.
+
+This package contains a number of utility functions for working with data
+inside of Go `html/template` and `text/template` files.
+
+To add these functions, use the `template.Funcs()` method:
+
+ t := templates.New("foo").Funcs(sprig.FuncMap())
+
+Note that you should add the function map before you parse any template files.
+
+ In several cases, Sprig reverses the order of arguments from the way they
+ appear in the standard library. This is to make it easier to pipe
+ arguments into functions.
+
+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
+*/
+package sprig
diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/functions.go
new file mode 100644
index 0000000..7b5b0af
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/functions.go
@@ -0,0 +1,306 @@
+package sprig
+
+import (
+ "errors"
+ "html/template"
+ "os"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ ttemplate "text/template"
+ "time"
+
+ util "github.com/Masterminds/goutils"
+ "github.com/huandu/xstrings"
+)
+
+// Produce the function map.
+//
+// Use this to pass the functions into the template engine:
+//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
+//
+func FuncMap() template.FuncMap {
+ return HtmlFuncMap()
+}
+
+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
+func HermeticTxtFuncMap() ttemplate.FuncMap {
+ r := TxtFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
+func HermeticHtmlFuncMap() template.FuncMap {
+ r := HtmlFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// TxtFuncMap returns a 'text/template'.FuncMap
+func TxtFuncMap() ttemplate.FuncMap {
+ return ttemplate.FuncMap(GenericFuncMap())
+}
+
+// HtmlFuncMap returns an 'html/template'.Funcmap
+func HtmlFuncMap() template.FuncMap {
+ return template.FuncMap(GenericFuncMap())
+}
+
+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
+func GenericFuncMap() map[string]interface{} {
+ gfm := make(map[string]interface{}, len(genericMap))
+ for k, v := range genericMap {
+ gfm[k] = v
+ }
+ return gfm
+}
+
+// These functions are not guaranteed to evaluate to the same result for given input, because they
+// refer to the environemnt or global state.
+var nonhermeticFunctions = []string{
+ // Date functions
+ "date",
+ "date_in_zone",
+ "date_modify",
+ "now",
+ "htmlDate",
+ "htmlDateInZone",
+ "dateInZone",
+ "dateModify",
+
+ // Strings
+ "randAlphaNum",
+ "randAlpha",
+ "randAscii",
+ "randNumeric",
+ "uuidv4",
+
+ // OS
+ "env",
+ "expandenv",
+
+ // Network
+ "getHostByName",
+}
+
+var genericMap = map[string]interface{}{
+ "hello": func() string { return "Hello!" },
+
+ // Date functions
+ "date": date,
+ "date_in_zone": dateInZone,
+ "date_modify": dateModify,
+ "now": func() time.Time { return time.Now() },
+ "htmlDate": htmlDate,
+ "htmlDateInZone": htmlDateInZone,
+ "dateInZone": dateInZone,
+ "dateModify": dateModify,
+ "ago": dateAgo,
+ "toDate": toDate,
+ "unixEpoch": unixEpoch,
+
+ // Strings
+ "abbrev": abbrev,
+ "abbrevboth": abbrevboth,
+ "trunc": trunc,
+ "trim": strings.TrimSpace,
+ "upper": strings.ToUpper,
+ "lower": strings.ToLower,
+ "title": strings.Title,
+ "untitle": untitle,
+ "substr": substring,
+ // Switch order so that "foo" | repeat 5
+ "repeat": func(count int, str string) string { return strings.Repeat(str, count) },
+ // Deprecated: Use trimAll.
+ "trimall": func(a, b string) string { return strings.Trim(b, a) },
+ // Switch order so that "$foo" | trimall "$"
+ "trimAll": func(a, b string) string { return strings.Trim(b, a) },
+ "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
+ "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
+ "nospace": util.DeleteWhiteSpace,
+ "initials": initials,
+ "randAlphaNum": randAlphaNumeric,
+ "randAlpha": randAlpha,
+ "randAscii": randAscii,
+ "randNumeric": randNumeric,
+ "swapcase": util.SwapCase,
+ "shuffle": xstrings.Shuffle,
+ "snakecase": xstrings.ToSnakeCase,
+ "camelcase": xstrings.ToCamelCase,
+ "kebabcase": xstrings.ToKebabCase,
+ "wrap": func(l int, s string) string { return util.Wrap(s, l) },
+ "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
+ // Switch order so that "foobar" | contains "foo"
+ "contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
+ "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
+ "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
+ "quote": quote,
+ "squote": squote,
+ "cat": cat,
+ "indent": indent,
+ "nindent": nindent,
+ "replace": replace,
+ "plural": plural,
+ "sha1sum": sha1sum,
+ "sha256sum": sha256sum,
+ "adler32sum": adler32sum,
+ "toString": strval,
+
+ // Wrap Atoi to stop errors.
+ "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
+ "int64": toInt64,
+ "int": toInt,
+ "float64": toFloat64,
+ "toDecimal": toDecimal,
+
+ //"gt": func(a, b int) bool {return a > b},
+ //"gte": func(a, b int) bool {return a >= b},
+ //"lt": func(a, b int) bool {return a < b},
+ //"lte": func(a, b int) bool {return a <= b},
+
+ // split "/" foo/bar returns map[int]string{0: foo, 1: bar}
+ "split": split,
+ "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
+ // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
+ "splitn": splitn,
+ "toStrings": strslice,
+
+ "until": until,
+ "untilStep": untilStep,
+
+ // VERY basic arithmetic.
+ "add1": func(i interface{}) int64 { return toInt64(i) + 1 },
+ "add": func(i ...interface{}) int64 {
+ var a int64 = 0
+ for _, b := range i {
+ a += toInt64(b)
+ }
+ return a
+ },
+ "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
+ "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
+ "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
+ "mul": func(a interface{}, v ...interface{}) int64 {
+ val := toInt64(a)
+ for _, b := range v {
+ val = val * toInt64(b)
+ }
+ return val
+ },
+ "biggest": max,
+ "max": max,
+ "min": min,
+ "ceil": ceil,
+ "floor": floor,
+ "round": round,
+
+ // string slices. Note that we reverse the order b/c that's better
+ // for template processing.
+ "join": join,
+ "sortAlpha": sortAlpha,
+
+ // Defaults
+ "default": dfault,
+ "empty": empty,
+ "coalesce": coalesce,
+ "compact": compact,
+ "deepCopy": deepCopy,
+ "toJson": toJson,
+ "toPrettyJson": toPrettyJson,
+ "ternary": ternary,
+
+ // Reflection
+ "typeOf": typeOf,
+ "typeIs": typeIs,
+ "typeIsLike": typeIsLike,
+ "kindOf": kindOf,
+ "kindIs": kindIs,
+ "deepEqual": reflect.DeepEqual,
+
+ // OS:
+ "env": func(s string) string { return os.Getenv(s) },
+ "expandenv": func(s string) string { return os.ExpandEnv(s) },
+
+ // Network:
+ "getHostByName": getHostByName,
+
+ // File Paths:
+ "base": path.Base,
+ "dir": path.Dir,
+ "clean": path.Clean,
+ "ext": path.Ext,
+ "isAbs": path.IsAbs,
+
+ // Encoding:
+ "b64enc": base64encode,
+ "b64dec": base64decode,
+ "b32enc": base32encode,
+ "b32dec": base32decode,
+
+ // Data Structures:
+ "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
+ "list": list,
+ "dict": dict,
+ "set": set,
+ "unset": unset,
+ "hasKey": hasKey,
+ "pluck": pluck,
+ "keys": keys,
+ "pick": pick,
+ "omit": omit,
+ "merge": merge,
+ "mergeOverwrite": mergeOverwrite,
+ "values": values,
+
+ "append": push, "push": push,
+ "prepend": prepend,
+ "first": first,
+ "rest": rest,
+ "last": last,
+ "initial": initial,
+ "reverse": reverse,
+ "uniq": uniq,
+ "without": without,
+ "has": has,
+ "slice": slice,
+ "concat": concat,
+
+ // Crypto:
+ "genPrivateKey": generatePrivateKey,
+ "derivePassword": derivePassword,
+ "buildCustomCert": buildCustomCertificate,
+ "genCA": generateCertificateAuthority,
+ "genSelfSignedCert": generateSelfSignedCertificate,
+ "genSignedCert": generateSignedCertificate,
+ "encryptAES": encryptAES,
+ "decryptAES": decryptAES,
+
+ // UUIDs:
+ "uuidv4": uuidv4,
+
+ // SemVer:
+ "semver": semver,
+ "semverCompare": semverCompare,
+
+ // Flow Control:
+ "fail": func(msg string) (string, error) { return "", errors.New(msg) },
+
+ // Regex
+ "regexMatch": regexMatch,
+ "regexFindAll": regexFindAll,
+ "regexFind": regexFind,
+ "regexReplaceAll": regexReplaceAll,
+ "regexReplaceAllLiteral": regexReplaceAllLiteral,
+ "regexSplit": regexSplit,
+
+ // URLs:
+ "urlParse": urlParse,
+ "urlJoin": urlJoin,
+}
diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml
new file mode 100644
index 0000000..f317d2b
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/glide.yaml
@@ -0,0 +1,19 @@
+package: github.com/Masterminds/sprig
+import:
+- package: github.com/Masterminds/goutils
+ version: ^1.0.0
+- package: github.com/google/uuid
+ version: ^1.0.0
+- package: golang.org/x/crypto
+ subpackages:
+ - scrypt
+- package: github.com/Masterminds/semver
+ version: ^v1.2.2
+- package: github.com/stretchr/testify
+ version: ^v1.2.2
+- package: github.com/imdario/mergo
+ version: ~0.3.7
+- package: github.com/huandu/xstrings
+ version: ^1.2
+- package: github.com/mitchellh/copystructure
+ version: ^1.0.0
diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/list.go
new file mode 100644
index 0000000..c0381bb
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/list.go
@@ -0,0 +1,311 @@
+package sprig
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// Reflection is used in these functions so that slices and arrays of strings,
+// ints, and other types not implementing []interface{} can be worked with.
+// For example, this is useful if you need to work on the output of regexs.
+
+func list(v ...interface{}) []interface{} {
+ return v
+}
+
+func push(list interface{}, v interface{}) []interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append(nl, v)
+
+ default:
+ panic(fmt.Sprintf("Cannot push on type %s", tp))
+ }
+}
+
+func prepend(list interface{}, v interface{}) []interface{} {
+ //return append([]interface{}{v}, list...)
+
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append([]interface{}{v}, nl...)
+
+ default:
+ panic(fmt.Sprintf("Cannot prepend on type %s", tp))
+ }
+}
+
+func last(list interface{}) interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil
+ }
+
+ return l2.Index(l - 1).Interface()
+ default:
+ panic(fmt.Sprintf("Cannot find last on type %s", tp))
+ }
+}
+
+func first(list interface{}) interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil
+ }
+
+ return l2.Index(0).Interface()
+ default:
+ panic(fmt.Sprintf("Cannot find first on type %s", tp))
+ }
+}
+
+func rest(list interface{}) []interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 1; i < l; i++ {
+ nl[i-1] = l2.Index(i).Interface()
+ }
+
+ return nl
+ default:
+ panic(fmt.Sprintf("Cannot find rest on type %s", tp))
+ }
+}
+
+func initial(list interface{}) []interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 0; i < l-1; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return nl
+ default:
+ panic(fmt.Sprintf("Cannot find initial on type %s", tp))
+ }
+}
+
+func sortAlpha(list interface{}) []string {
+ k := reflect.Indirect(reflect.ValueOf(list)).Kind()
+ switch k {
+ case reflect.Slice, reflect.Array:
+ a := strslice(list)
+ s := sort.StringSlice(a)
+ s.Sort()
+ return s
+ }
+ return []string{strval(list)}
+}
+
+func reverse(v interface{}) []interface{} {
+ tp := reflect.TypeOf(v).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(v)
+
+ l := l2.Len()
+ // We do not sort in place because the incoming array should not be altered.
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[l-i-1] = l2.Index(i).Interface()
+ }
+
+ return nl
+ default:
+ panic(fmt.Sprintf("Cannot find reverse on type %s", tp))
+ }
+}
+
+func compact(list interface{}) []interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !empty(item) {
+ nl = append(nl, item)
+ }
+ }
+
+ return nl
+ default:
+ panic(fmt.Sprintf("Cannot compact on type %s", tp))
+ }
+}
+
+func uniq(list interface{}) []interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ dest := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(dest, item) {
+ dest = append(dest, item)
+ }
+ }
+
+ return dest
+ default:
+ panic(fmt.Sprintf("Cannot find uniq on type %s", tp))
+ }
+}
+
+func inList(haystack []interface{}, needle interface{}) bool {
+ for _, h := range haystack {
+ if reflect.DeepEqual(needle, h) {
+ return true
+ }
+ }
+ return false
+}
+
+func without(list interface{}, omit ...interface{}) []interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ res := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(omit, item) {
+ res = append(res, item)
+ }
+ }
+
+ return res
+ default:
+ panic(fmt.Sprintf("Cannot find without on type %s", tp))
+ }
+}
+
+func has(needle interface{}, haystack interface{}) bool {
+ if haystack == nil {
+ return false
+ }
+ tp := reflect.TypeOf(haystack).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(haystack)
+ var item interface{}
+ l := l2.Len()
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if reflect.DeepEqual(needle, item) {
+ return true
+ }
+ }
+
+ return false
+ default:
+ panic(fmt.Sprintf("Cannot find has on type %s", tp))
+ }
+}
+
+// $list := [1, 2, 3, 4, 5]
+// slice $list -> list[0:5] = list[:]
+// slice $list 0 3 -> list[0:3] = list[:3]
+// slice $list 3 5 -> list[3:5]
+// slice $list 3 -> list[3:5] = list[3:]
+func slice(list interface{}, indices ...interface{}) interface{} {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil
+ }
+
+ var start, end int
+ if len(indices) > 0 {
+ start = toInt(indices[0])
+ }
+ if len(indices) < 2 {
+ end = l
+ } else {
+ end = toInt(indices[1])
+ }
+
+ return l2.Slice(start, end).Interface()
+ default:
+ panic(fmt.Sprintf("list should be type of slice or array but %s", tp))
+ }
+}
+
+func concat(lists ...interface{}) interface{} {
+ var res []interface{}
+ for _, list := range lists {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+ for i := 0; i < l2.Len(); i++ {
+ res = append(res, l2.Index(i).Interface())
+ }
+ default:
+ panic(fmt.Sprintf("Cannot concat type %s as list", tp))
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/Masterminds/sprig/network.go b/vendor/github.com/Masterminds/sprig/network.go
new file mode 100644
index 0000000..d786cc7
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/network.go
@@ -0,0 +1,12 @@
+package sprig
+
+import (
+ "math/rand"
+ "net"
+)
+
+func getHostByName(name string) string {
+ addrs, _ := net.LookupHost(name)
+ //TODO: add error handing when release v3 cames out
+ return addrs[rand.Intn(len(addrs))]
+}
diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go
new file mode 100644
index 0000000..f4af4af
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/numeric.go
@@ -0,0 +1,169 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+)
+
+// toFloat64 converts 64-bit floats
+func toFloat64(v interface{}) float64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return float64(val.Int())
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return float64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ return float64(val.Uint())
+ case reflect.Float32, reflect.Float64:
+ return val.Float()
+ case reflect.Bool:
+ if val.Bool() == true {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func toInt(v interface{}) int {
+ //It's not optimal. Bud I don't want duplicate toInt64 code.
+ return int(toInt64(v))
+}
+
+// toInt64 converts integer types to 64-bit integers
+func toInt64(v interface{}) int64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return val.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ tv := val.Uint()
+ if tv <= math.MaxInt64 {
+ return int64(tv)
+ }
+ // TODO: What is the sensible thing to do here?
+ return math.MaxInt64
+ case reflect.Float32, reflect.Float64:
+ return int64(val.Float())
+ case reflect.Bool:
+ if val.Bool() == true {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func max(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb > aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func min(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb < aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func until(count int) []int {
+ step := 1
+ if count < 0 {
+ step = -1
+ }
+ return untilStep(0, count, step)
+}
+
+func untilStep(start, stop, step int) []int {
+ v := []int{}
+
+ if stop < start {
+ if step >= 0 {
+ return v
+ }
+ for i := start; i > stop; i += step {
+ v = append(v, i)
+ }
+ return v
+ }
+
+ if step <= 0 {
+ return v
+ }
+ for i := start; i < stop; i += step {
+ v = append(v, i)
+ }
+ return v
+}
+
+func floor(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Floor(aa)
+}
+
+func ceil(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Ceil(aa)
+}
+
+func round(a interface{}, p int, r_opt ...float64) float64 {
+ roundOn := .5
+ if len(r_opt) > 0 {
+ roundOn = r_opt[0]
+ }
+ val := toFloat64(a)
+ places := toFloat64(p)
+
+ var round float64
+ pow := math.Pow(10, places)
+ digit := pow * val
+ _, div := math.Modf(digit)
+ if div >= roundOn {
+ round = math.Ceil(digit)
+ } else {
+ round = math.Floor(digit)
+ }
+ return round / pow
+}
+
+// converts unix octal to decimal
+func toDecimal(v interface{}) int64 {
+ result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
+ if err != nil {
+ return 0
+ }
+ return result
+}
diff --git a/vendor/github.com/Masterminds/sprig/reflect.go b/vendor/github.com/Masterminds/sprig/reflect.go
new file mode 100644
index 0000000..8a65c13
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/reflect.go
@@ -0,0 +1,28 @@
+package sprig
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// typeIs returns true if the src is the type named in target.
+func typeIs(target string, src interface{}) bool {
+ return target == typeOf(src)
+}
+
+func typeIsLike(target string, src interface{}) bool {
+ t := typeOf(src)
+ return target == t || "*"+target == t
+}
+
+func typeOf(src interface{}) string {
+ return fmt.Sprintf("%T", src)
+}
+
+func kindIs(target string, src interface{}) bool {
+ return target == kindOf(src)
+}
+
+func kindOf(src interface{}) string {
+ return reflect.ValueOf(src).Kind().String()
+}
diff --git a/vendor/github.com/Masterminds/sprig/regex.go b/vendor/github.com/Masterminds/sprig/regex.go
new file mode 100644
index 0000000..2016f66
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/regex.go
@@ -0,0 +1,35 @@
+package sprig
+
+import (
+ "regexp"
+)
+
+func regexMatch(regex string, s string) bool {
+ match, _ := regexp.MatchString(regex, s)
+ return match
+}
+
+func regexFindAll(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.FindAllString(s, n)
+}
+
+func regexFind(regex string, s string) string {
+ r := regexp.MustCompile(regex)
+ return r.FindString(s)
+}
+
+func regexReplaceAll(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllString(s, repl)
+}
+
+func regexReplaceAllLiteral(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllLiteralString(s, repl)
+}
+
+func regexSplit(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.Split(s, n)
+}
diff --git a/vendor/github.com/Masterminds/sprig/semver.go b/vendor/github.com/Masterminds/sprig/semver.go
new file mode 100644
index 0000000..c2bf8a1
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/semver.go
@@ -0,0 +1,23 @@
+package sprig
+
+import (
+ sv2 "github.com/Masterminds/semver"
+)
+
+func semverCompare(constraint, version string) (bool, error) {
+ c, err := sv2.NewConstraint(constraint)
+ if err != nil {
+ return false, err
+ }
+
+ v, err := sv2.NewVersion(version)
+ if err != nil {
+ return false, err
+ }
+
+ return c.Check(v), nil
+}
+
+func semver(version string) (*sv2.Version, error) {
+ return sv2.NewVersion(version)
+}
diff --git a/vendor/github.com/Masterminds/sprig/strings.go b/vendor/github.com/Masterminds/sprig/strings.go
new file mode 100644
index 0000000..943fa3e
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/strings.go
@@ -0,0 +1,233 @@
+package sprig
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ util "github.com/Masterminds/goutils"
+)
+
+func base64encode(v string) string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base64decode(v string) string {
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func base32encode(v string) string {
+ return base32.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base32decode(v string) string {
+ data, err := base32.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func abbrev(width int, s string) string {
+ if width < 4 {
+ return s
+ }
+ r, _ := util.Abbreviate(s, width)
+ return r
+}
+
+func abbrevboth(left, right int, s string) string {
+ if right < 4 || left > 0 && right < 7 {
+ return s
+ }
+ r, _ := util.AbbreviateFull(s, left, right)
+ return r
+}
+func initials(s string) string {
+ // Wrap this just to eliminate the var args, which templates don't do well.
+ return util.Initials(s)
+}
+
+func randAlphaNumeric(count int) string {
+ // It is not possible, it appears, to actually generate an error here.
+ r, _ := util.CryptoRandomAlphaNumeric(count)
+ return r
+}
+
+func randAlpha(count int) string {
+ r, _ := util.CryptoRandomAlphabetic(count)
+ return r
+}
+
+func randAscii(count int) string {
+ r, _ := util.CryptoRandomAscii(count)
+ return r
+}
+
+func randNumeric(count int) string {
+ r, _ := util.CryptoRandomNumeric(count)
+ return r
+}
+
+func untitle(str string) string {
+ return util.Uncapitalize(str)
+}
+
+func quote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("%q", strval(s)))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func squote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("'%v'", s))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func cat(v ...interface{}) string {
+ v = removeNilElements(v)
+ r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
+ return fmt.Sprintf(r, v...)
+}
+
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string {
+ return "\n" + indent(spaces, v)
+}
+
+func replace(old, new, src string) string {
+ return strings.Replace(src, old, new, -1)
+}
+
+func plural(one, many string, count int) string {
+ if count == 1 {
+ return one
+ }
+ return many
+}
+
+func strslice(v interface{}) []string {
+ switch v := v.(type) {
+ case []string:
+ return v
+ case []interface{}:
+ b := make([]string, 0, len(v))
+ for _, s := range v {
+ if s != nil {
+ b = append(b, strval(s))
+ }
+ }
+ return b
+ default:
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ l := val.Len()
+ b := make([]string, 0, l)
+ for i := 0; i < l; i++ {
+ value := val.Index(i).Interface()
+ if value != nil {
+ b = append(b, strval(value))
+ }
+ }
+ return b
+ default:
+ if v == nil {
+ return []string{}
+ } else {
+ return []string{strval(v)}
+ }
+ }
+ }
+}
+
+func removeNilElements(v []interface{}) []interface{} {
+ newSlice := make([]interface{}, 0, len(v))
+ for _, i := range v {
+ if i != nil {
+ newSlice = append(newSlice, i)
+ }
+ }
+ return newSlice
+}
+
+func strval(v interface{}) string {
+ switch v := v.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ case error:
+ return v.Error()
+ case fmt.Stringer:
+ return v.String()
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+func trunc(c int, s string) string {
+ if len(s) <= c {
+ return s
+ }
+ return s[0:c]
+}
+
+func join(sep string, v interface{}) string {
+ return strings.Join(strslice(v), sep)
+}
+
+func split(sep, orig string) map[string]string {
+ parts := strings.Split(orig, sep)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+func splitn(sep string, n int, orig string) map[string]string {
+ parts := strings.SplitN(orig, sep, n)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+ if start < 0 {
+ return s[:end]
+ }
+ if end < 0 || end > len(s) {
+ return s[start:]
+ }
+ return s[start:end]
+}
diff --git a/vendor/github.com/Masterminds/sprig/url.go b/vendor/github.com/Masterminds/sprig/url.go
new file mode 100644
index 0000000..5f22d80
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/url.go
@@ -0,0 +1,66 @@
+package sprig
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+func dictGetOrEmpty(dict map[string]interface{}, key string) string {
+ value, ok := dict[key]; if !ok {
+ return ""
+ }
+ tp := reflect.TypeOf(value).Kind()
+ if tp != reflect.String {
+ panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
+ }
+ return reflect.ValueOf(value).String()
+}
+
+// parses given URL to return dict object
+func urlParse(v string) map[string]interface{} {
+ dict := map[string]interface{}{}
+ parsedUrl, err := url.Parse(v)
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse url: %s", err))
+ }
+ dict["scheme"] = parsedUrl.Scheme
+ dict["host"] = parsedUrl.Host
+ dict["hostname"] = parsedUrl.Hostname()
+ dict["path"] = parsedUrl.Path
+ dict["query"] = parsedUrl.RawQuery
+ dict["opaque"] = parsedUrl.Opaque
+ dict["fragment"] = parsedUrl.Fragment
+ if parsedUrl.User != nil {
+ dict["userinfo"] = parsedUrl.User.String()
+ } else {
+ dict["userinfo"] = ""
+ }
+
+ return dict
+}
+
+// join given dict to URL string
+func urlJoin(d map[string]interface{}) string {
+ resUrl := url.URL{
+ Scheme: dictGetOrEmpty(d, "scheme"),
+ Host: dictGetOrEmpty(d, "host"),
+ Path: dictGetOrEmpty(d, "path"),
+ RawQuery: dictGetOrEmpty(d, "query"),
+ Opaque: dictGetOrEmpty(d, "opaque"),
+ Fragment: dictGetOrEmpty(d, "fragment"),
+
+ }
+ userinfo := dictGetOrEmpty(d, "userinfo")
+ var user *url.Userinfo = nil
+ if userinfo != "" {
+ tempUrl, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
+ }
+ user = tempUrl.User
+ }
+
+ resUrl.User = user
+ return resUrl.String()
+}
diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore
new file mode 100644
index 0000000..748e4c8
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/.gitignore
@@ -0,0 +1,5 @@
+*.sublime-*
+.DS_Store
+*.swp
+*.swo
+tags
diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml
new file mode 100644
index 0000000..cf31e6a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - "1.10.x"
+ - "1.11.x"
+ - tip
diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE
new file mode 100644
index 0000000..4b9986d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2012, Martin Angers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md
new file mode 100644
index 0000000..07de0c4
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/README.md
@@ -0,0 +1,188 @@
+# Purell
+
+Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
+
+Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
+
+[](http://travis-ci.org/PuerkitoBio/purell)
+
+## Install
+
+`go get github.com/PuerkitoBio/purell`
+
+## Changelog
+
+* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor).
+* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
+* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
+* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
+* **v0.2.0** : Add benchmarks, Attempt IDN support.
+* **v0.1.0** : Initial release.
+
+## Examples
+
+From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
+
+```go
+package purell
+
+import (
+ "fmt"
+ "net/url"
+)
+
+func ExampleNormalizeURLString() {
+ if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
+ FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
+ panic(err)
+ } else {
+ fmt.Print(normalized)
+ }
+ // Output: http://somewebsite.com:80/Amazing%3F/url/
+}
+
+func ExampleMustNormalizeURLString() {
+ normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
+ FlagsUnsafeGreedy)
+ fmt.Print(normalized)
+
+ // Output: http://somewebsite.com/Amazing%FA/url
+}
+
+func ExampleNormalizeURL() {
+ if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
+ panic(err)
+ } else {
+ normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
+ fmt.Print(normalized)
+ }
+
+ // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
+}
+```
+
+## API
+
+As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
+
+```go
+const (
+ // Safe normalizations
+ FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+ FlagLowercaseHost // http://HOST -> http://host
+ FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
+ FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
+ FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
+ FlagRemoveDefaultPort // http://host:80 -> http://host
+ FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
+
+ // Usually safe normalizations
+ FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+ FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+ FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
+
+ // Unsafe normalizations
+ FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
+ FlagRemoveFragment // http://host/path#fragment -> http://host/path
+ FlagForceHTTP // https://host -> http://host
+ FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+ FlagRemoveWWW // http://www.host/ -> http://host/
+ FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+ FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+ // Normalizations not in the wikipedia article, required to cover tests cases
+ // submitted by jehiah
+ FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
+ FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
+ FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
+ FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+ FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
+
+ // Convenience set of safe normalizations
+ FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+ // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+ // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+ // Convenience set of usually safe normalizations (includes FlagsSafe)
+ FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+ FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+ // Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+ FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+ FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+ // Convenience set of all available flags
+ FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+ FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+```
+
+For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
+
+The [full godoc reference is available on gopkgdoc][godoc].
+
+Some things to note:
+
+* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
+
+* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
+ - %24 -> $
+ - %26 -> &
+ - %2B-%3B -> +,-./0123456789:;
+ - %3D -> =
+ - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ - %5F -> _
+ - %61-%7A -> abcdefghijklmnopqrstuvwxyz
+ - %7E -> ~
+
+
+* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
+
+* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
+
+* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
+
+### Safe vs Usually Safe vs Unsafe
+
+Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
+
+Consider the following URL:
+
+`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+Normalizing with the `FlagsSafe` gives:
+
+`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+With the `FlagsUsuallySafeGreedy`:
+
+`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
+
+And with `FlagsUnsafeGreedy`:
+
+`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
+
+## TODOs
+
+* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
+
+## Thanks / Contributions
+
+@rogpeppe
+@jehiah
+@opennota
+@pchristopher1275
+@zenovich
+@beeker1121
+
+## License
+
+The [BSD 3-Clause license][bsd].
+
+[bsd]: http://opensource.org/licenses/BSD-3-Clause
+[wiki]: http://en.wikipedia.org/wiki/URL_normalization
+[rfc]: http://tools.ietf.org/html/rfc3986#section-6
+[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
+[pr5]: https://github.com/PuerkitoBio/purell/pull/5
+[iss7]: https://github.com/PuerkitoBio/purell/issues/7
diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go
new file mode 100644
index 0000000..6d0fc19
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/purell.go
@@ -0,0 +1,379 @@
+/*
+Package purell offers URL normalization as described on the wikipedia page:
+http://en.wikipedia.org/wiki/URL_normalization
+*/
+package purell
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/PuerkitoBio/urlesc"
+ "golang.org/x/net/idna"
+ "golang.org/x/text/unicode/norm"
+ "golang.org/x/text/width"
+)
+
+// A set of normalization flags determines how a URL will
+// be normalized.
+type NormalizationFlags uint
+
+const (
+ // Safe normalizations
+ FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+ FlagLowercaseHost // http://HOST -> http://host
+ FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
+ FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
+ FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
+ FlagRemoveDefaultPort // http://host:80 -> http://host
+ FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
+
+ // Usually safe normalizations
+ FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+ FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+ FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
+
+ // Unsafe normalizations
+ FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
+ FlagRemoveFragment // http://host/path#fragment -> http://host/path
+ FlagForceHTTP // https://host -> http://host
+ FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+ FlagRemoveWWW // http://www.host/ -> http://host/
+ FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+ FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+ // Normalizations not in the wikipedia article, required to cover tests cases
+ // submitted by jehiah
+ FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
+ FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
+ FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
+ FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+ FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
+
+ // Convenience set of safe normalizations
+ FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+ // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+ // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+ // Convenience set of usually safe normalizations (includes FlagsSafe)
+ FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+ FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+ // Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+ FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+ FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+ // Convenience set of all available flags
+ FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+ FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+
+const (
+ defaultHttpPort = ":80"
+ defaultHttpsPort = ":443"
+)
+
+// Regular expressions used by the normalizations
+var rxPort = regexp.MustCompile(`(:\d+)/?$`)
+var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
+var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
+var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
+var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
+var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
+var rxEmptyPort = regexp.MustCompile(`:+$`)
+
+// Map of flags to implementation function.
+// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
+// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
+
+// Since maps have undefined traversing order, make a slice of ordered keys
+var flagsOrder = []NormalizationFlags{
+ FlagLowercaseScheme,
+ FlagLowercaseHost,
+ FlagRemoveDefaultPort,
+ FlagRemoveDirectoryIndex,
+ FlagRemoveDotSegments,
+ FlagRemoveFragment,
+ FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
+ FlagRemoveDuplicateSlashes,
+ FlagRemoveWWW,
+ FlagAddWWW,
+ FlagSortQuery,
+ FlagDecodeDWORDHost,
+ FlagDecodeOctalHost,
+ FlagDecodeHexHost,
+ FlagRemoveUnnecessaryHostDots,
+ FlagRemoveEmptyPortSeparator,
+ FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
+ FlagAddTrailingSlash,
+}
+
+// ... and then the map, where order is unimportant
+var flags = map[NormalizationFlags]func(*url.URL){
+ FlagLowercaseScheme: lowercaseScheme,
+ FlagLowercaseHost: lowercaseHost,
+ FlagRemoveDefaultPort: removeDefaultPort,
+ FlagRemoveDirectoryIndex: removeDirectoryIndex,
+ FlagRemoveDotSegments: removeDotSegments,
+ FlagRemoveFragment: removeFragment,
+ FlagForceHTTP: forceHTTP,
+ FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
+ FlagRemoveWWW: removeWWW,
+ FlagAddWWW: addWWW,
+ FlagSortQuery: sortQuery,
+ FlagDecodeDWORDHost: decodeDWORDHost,
+ FlagDecodeOctalHost: decodeOctalHost,
+ FlagDecodeHexHost: decodeHexHost,
+ FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
+ FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
+ FlagRemoveTrailingSlash: removeTrailingSlash,
+ FlagAddTrailingSlash: addTrailingSlash,
+}
+
+// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
+// It takes an URL string as input, as well as the normalization flags.
+func MustNormalizeURLString(u string, f NormalizationFlags) string {
+ result, e := NormalizeURLString(u, f)
+ if e != nil {
+ panic(e)
+ }
+ return result
+}
+
+// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
+// It takes an URL string as input, as well as the normalization flags.
+func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
+ parsed, err := url.Parse(u)
+ if err != nil {
+ return "", err
+ }
+
+ if f&FlagLowercaseHost == FlagLowercaseHost {
+ parsed.Host = strings.ToLower(parsed.Host)
+ }
+
+ // The idna package doesn't fully conform to RFC 5895
+ // (https://tools.ietf.org/html/rfc5895), so we do it here.
+ // Taken from Go 1.8 cycle source, courtesy of bradfitz.
+ // TODO: Remove when (if?) idna package conforms to RFC 5895.
+ parsed.Host = width.Fold.String(parsed.Host)
+ parsed.Host = norm.NFC.String(parsed.Host)
+ if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
+ return "", err
+ }
+
+ return NormalizeURL(parsed, f), nil
+}
+
+// NormalizeURL returns the normalized string.
+// It takes a parsed URL object as input, as well as the normalization flags.
+func NormalizeURL(u *url.URL, f NormalizationFlags) string {
+ for _, k := range flagsOrder {
+ if f&k == k {
+ flags[k](u)
+ }
+ }
+ return urlesc.Escape(u)
+}
+
+func lowercaseScheme(u *url.URL) {
+ if len(u.Scheme) > 0 {
+ u.Scheme = strings.ToLower(u.Scheme)
+ }
+}
+
+func lowercaseHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = strings.ToLower(u.Host)
+ }
+}
+
+func removeDefaultPort(u *url.URL) {
+ if len(u.Host) > 0 {
+ scheme := strings.ToLower(u.Scheme)
+ u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
+ if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
+ return ""
+ }
+ return val
+ })
+ }
+}
+
+func removeTrailingSlash(u *url.URL) {
+ if l := len(u.Path); l > 0 {
+ if strings.HasSuffix(u.Path, "/") {
+ u.Path = u.Path[:l-1]
+ }
+ } else if l = len(u.Host); l > 0 {
+ if strings.HasSuffix(u.Host, "/") {
+ u.Host = u.Host[:l-1]
+ }
+ }
+}
+
+func addTrailingSlash(u *url.URL) {
+ if l := len(u.Path); l > 0 {
+ if !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ }
+ } else if l = len(u.Host); l > 0 {
+ if !strings.HasSuffix(u.Host, "/") {
+ u.Host += "/"
+ }
+ }
+}
+
+func removeDotSegments(u *url.URL) {
+ if len(u.Path) > 0 {
+ var dotFree []string
+ var lastIsDot bool
+
+ sections := strings.Split(u.Path, "/")
+ for _, s := range sections {
+ if s == ".." {
+ if len(dotFree) > 0 {
+ dotFree = dotFree[:len(dotFree)-1]
+ }
+ } else if s != "." {
+ dotFree = append(dotFree, s)
+ }
+ lastIsDot = (s == "." || s == "..")
+ }
+ // Special case if host does not end with / and new path does not begin with /
+ u.Path = strings.Join(dotFree, "/")
+ if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
+ u.Path = "/" + u.Path
+ }
+ // Special case if the last segment was a dot, make sure the path ends with a slash
+ if lastIsDot && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ }
+ }
+}
+
+func removeDirectoryIndex(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
+ }
+}
+
+func removeFragment(u *url.URL) {
+ u.Fragment = ""
+}
+
+func forceHTTP(u *url.URL) {
+ if strings.ToLower(u.Scheme) == "https" {
+ u.Scheme = "http"
+ }
+}
+
+func removeDuplicateSlashes(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
+ }
+}
+
+func removeWWW(u *url.URL) {
+ if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+ u.Host = u.Host[4:]
+ }
+}
+
+func addWWW(u *url.URL) {
+ if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+ u.Host = "www." + u.Host
+ }
+}
+
+func sortQuery(u *url.URL) {
+ q := u.Query()
+
+ if len(q) > 0 {
+ arKeys := make([]string, len(q))
+ i := 0
+ for k := range q {
+ arKeys[i] = k
+ i++
+ }
+ sort.Strings(arKeys)
+ buf := new(bytes.Buffer)
+ for _, k := range arKeys {
+ sort.Strings(q[k])
+ for _, v := range q[k] {
+ if buf.Len() > 0 {
+ buf.WriteRune('&')
+ }
+ buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
+ }
+ }
+
+ // Rebuild the raw query string
+ u.RawQuery = buf.String()
+ }
+}
+
+func decodeDWORDHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+ var parts [4]int64
+
+ dword, _ := strconv.ParseInt(matches[1], 10, 0)
+ for i, shift := range []uint{24, 16, 8, 0} {
+ parts[i] = dword >> shift & 0xFF
+ }
+ u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
+ }
+ }
+}
+
+func decodeOctalHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
+ var parts [4]int64
+
+ for i := 1; i <= 4; i++ {
+ parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
+ }
+ u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
+ }
+ }
+}
+
+func decodeHexHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+ // Conversion is safe because of regex validation
+ parsed, _ := strconv.ParseInt(matches[1], 16, 0)
+ // Set host as DWORD (base 10) encoded host
+ u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
+ // The rest is the same as decoding a DWORD host
+ decodeDWORDHost(u)
+ }
+ }
+}
+
+func removeUnncessaryHostDots(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
+ // Trim the leading and trailing dots
+ u.Host = strings.Trim(matches[1], ".")
+ if len(matches) > 2 {
+ u.Host += matches[2]
+ }
+ }
+ }
+}
+
+func removeEmptyPortSeparator(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml
new file mode 100644
index 0000000..ba6b225
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - tip
+
+install:
+ - go build .
+
+script:
+ - go test -v
diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md
new file mode 100644
index 0000000..57aff0a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/README.md
@@ -0,0 +1,16 @@
+urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
+======
+
+Package urlesc implements query escaping as per RFC 3986.
+
+It contains some parts of the net/url package, modified so as to allow
+some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
+
+## Install
+
+ go get github.com/PuerkitoBio/urlesc
+
+## License
+
+Go license (BSD-3-Clause)
+
diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
new file mode 100644
index 0000000..1b84624
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
@@ -0,0 +1,180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package urlesc implements query escaping as per RFC 3986.
+// It contains some parts of the net/url package, modified so as to allow
+// some reserved characters incorrectly escaped by net/url.
+// See https://github.com/golang/go/issues/5684
+package urlesc
+
+import (
+ "bytes"
+ "net/url"
+ "strings"
+)
+
+type encoding int
+
+const (
+ encodePath encoding = 1 + iota
+ encodeUserPassword
+ encodeQueryComponent
+ encodeFragment
+)
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 3986.
+func shouldEscape(c byte, mode encoding) bool {
+ // §2.3 Unreserved characters (alphanum)
+ if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
+ return false
+ }
+
+ switch c {
+ case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
+ return false
+
+ // §2.2 Reserved characters (reserved)
+ case ':', '/', '?', '#', '[', ']', '@', // gen-delims
+ '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
+ // Different sections of the URL allow a few of
+ // the reserved characters to appear unescaped.
+ switch mode {
+ case encodePath: // §3.3
+ // The RFC allows sub-delims and : @.
+ // '/', '[' and ']' can be used to assign meaning to individual path
+ // segments. This package only manipulates the path as a whole,
+ // so we allow those as well. That leaves only ? and # to escape.
+ return c == '?' || c == '#'
+
+ case encodeUserPassword: // §3.2.1
+ // The RFC allows : and sub-delims in
+ // userinfo. The parsing of userinfo treats ':' as special so we must escape
+ // all the gen-delims.
+ return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
+
+ case encodeQueryComponent: // §3.4
+ // The RFC allows / and ?.
+ return c != '/' && c != '?'
+
+ case encodeFragment: // §4.1
+ // The RFC text is silent but the grammar allows
+ // everything, so escape nothing but #
+ return c == '#'
+ }
+ }
+
+ // Everything else must be escaped.
+ return true
+}
+
+// QueryEscape escapes the string so it can be safely placed
+// inside a URL query.
+func QueryEscape(s string) string {
+ return escape(s, encodeQueryComponent)
+}
+
+func escape(s string, mode encoding) string {
+ spaceCount, hexCount := 0, 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c, mode) {
+ if c == ' ' && mode == encodeQueryComponent {
+ spaceCount++
+ } else {
+ hexCount++
+ }
+ }
+ }
+
+ if spaceCount == 0 && hexCount == 0 {
+ return s
+ }
+
+ t := make([]byte, len(s)+2*hexCount)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; {
+ case c == ' ' && mode == encodeQueryComponent:
+ t[j] = '+'
+ j++
+ case shouldEscape(c, mode):
+ t[j] = '%'
+ t[j+1] = "0123456789ABCDEF"[c>>4]
+ t[j+2] = "0123456789ABCDEF"[c&15]
+ j += 3
+ default:
+ t[j] = s[i]
+ j++
+ }
+ }
+ return string(t)
+}
+
+var uiReplacer = strings.NewReplacer(
+ "%21", "!",
+ "%27", "'",
+ "%28", "(",
+ "%29", ")",
+ "%2A", "*",
+)
+
+// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
+func unescapeUserinfo(s string) string {
+ return uiReplacer.Replace(s)
+}
+
+// Escape reassembles the URL into a valid URL string.
+// The general form of the result is one of:
+//
+// scheme:opaque
+// scheme://userinfo@host/path?query#fragment
+//
+// If u.Opaque is non-empty, String uses the first form;
+// otherwise it uses the second form.
+//
+// In the second form, the following rules apply:
+// - if u.Scheme is empty, scheme: is omitted.
+// - if u.User is nil, userinfo@ is omitted.
+// - if u.Host is empty, host/ is omitted.
+// - if u.Scheme and u.Host are empty and u.User is nil,
+// the entire scheme://userinfo@host/ is omitted.
+// - if u.Host is non-empty and u.Path begins with a /,
+// the form host/path does not add its own /.
+// - if u.RawQuery is empty, ?query is omitted.
+// - if u.Fragment is empty, #fragment is omitted.
+func Escape(u *url.URL) string {
+ var buf bytes.Buffer
+ if u.Scheme != "" {
+ buf.WriteString(u.Scheme)
+ buf.WriteByte(':')
+ }
+ if u.Opaque != "" {
+ buf.WriteString(u.Opaque)
+ } else {
+ if u.Scheme != "" || u.Host != "" || u.User != nil {
+ buf.WriteString("//")
+ if ui := u.User; ui != nil {
+ buf.WriteString(unescapeUserinfo(ui.String()))
+ buf.WriteByte('@')
+ }
+ if h := u.Host; h != "" {
+ buf.WriteString(h)
+ }
+ }
+ if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
+ buf.WriteByte('/')
+ }
+ buf.WriteString(escape(u.Path, encodePath))
+ }
+ if u.RawQuery != "" {
+ buf.WriteByte('?')
+ buf.WriteString(u.RawQuery)
+ }
+ if u.Fragment != "" {
+ buf.WriteByte('#')
+ buf.WriteString(escape(u.Fragment, encodeFragment))
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore
new file mode 100644
index 0000000..a4d184e
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+refdocs
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# goreleaser output
+dist
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml
new file mode 100644
index 0000000..f1cacca
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.goreleaser.yml
@@ -0,0 +1,24 @@
+builds:
+ - env:
+ - CGO_ENABLED=0
+ # travis ci currently sets GOPATH even with go1.11.
+ # force-setting GO111MODULE=on to use vgo
+ - GO111MODULE=on
+ goos:
+ - linux
+ - darwin
+ goarch:
+ - amd64
+archive:
+ name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}"
+ files:
+ - LICENSE
+ - template/**
+ - example-config.json
+checksum:
+ name_template: "checksums.txt"
+changelog:
+ skip: true
+release:
+ # releases are uploaded to github by .travis.yml
+ disable: true
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml
new file mode 100644
index 0000000..d6845ba
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/.travis.yml
@@ -0,0 +1,32 @@
+language: go
+go:
+ - 1.11.x
+install:
+ - echo noop
+before_script:
+ # travis ci currently sets GOPATH even with go1.11.
+ # force-setting GO111MODULE=on to use vgo
+ - env GO111MODULE=on go mod download
+script:
+ # travis ci currently sets GOPATH even with go1.11.
+ # force-setting GO111MODULE=on to use vgo
+ - env GO111MODULE=on go build -v -o /dev/null
+deploy:
+ # use goreleaser to prepare dist/
+ - provider: script
+ skip_cleanup: true
+ on:
+ tags: true
+ script: curl -sL https://git.io/goreleaser | bash
+ # use github release feature to upload dist/
+ - provider: releases
+ skip_cleanup: true
+ on:
+ tags: true
+ file_glob: true
+ file:
+ - dist/*.tar.gz
+ - dist/*.zip
+ - dist/checksums.txt
+ api_key:
+ secure: r1GMgbVDnZTUcny/PbIATW9dXGOTpm2U9iEGaWvpprMO2AGo7ju7SWEJWtjcap3pc0YasyR2/eon9LC0scWY0Xlpeb+g0pRCQ39FABk1Vo3DpmIPRUCFFkaescWmrWDj3ImzjJgZjCewwK6Fo8s8ngnqIlZnE1Hq6ls2xDp6jNVf+Pn7LyqxkK4axFFSPQM9zFX3N1PVUH5RT03bIJfojJZguqnhNfyTvKvHJidoeWU/Ie+fXc4AdPHyP85xrmGHYl68O0HziU6JCLXira8r1FjUgVeYFYC5nnNuylszO6JWqWh1nXYDxs5FGPnZd9N8bEi/2ahiqms8eV7S+/DGzhSoEdHikcBxTgJpZP2VOmvRSITyv3RleJzCeMULTGFQodoxRgA/Q8qZySvInNjstiBjV2Pyucrnn990XQbN8rIV4RmNggJvbAwJNCGjCwS2eB42EKNCODTuzHPbIV0ap4EjvfBBo0cZ2J9M2Q6VzdpNErdntpM1hZl9yymv3MNN4hOiLQKkofoo/QI3cffB8Y0PBPAL8Cs9Mx1bbx+Dr8iitTHBUAt4a5DHFen4MS8znrZ+Cr4kLDD9QPJ8G0oh4tDKq8CJ73Gt+xqkLZEuka0W1awz9essqE7MH20kRJbKa5woTIs0v9njHMpbeqd7KrNV+1e5F5aPRQyiCzaom7c=
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md
new file mode 100644
index 0000000..e3b7603
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/README.md
@@ -0,0 +1,78 @@
+# Kubernetes Custom Resource API Reference Docs generator
+
+If you have a project that is Custom Resource Definitions and wanted to generate
+API Reference Docs [like this][ar] this tool is for you.
+
+[ar]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/
+
+## Current Users
+
+- [**Knative** API reference docs](https://www.knative.dev/docs/reference/)
+- [**Kubeflow** API reference docs](https://www.kubeflow.org/docs/reference/overview/)
+- [**Agones** API reference docs](https://agones.dev/site/docs/reference/agones_crd_api_reference/)
+- [**cert-manager** API reference docs](https://cert-manager.io/docs/reference/api-docs/)
+- _[[ADD YOUR PROJECT HERE]]_
+
+## Why
+
+Normally you would want to use the same [docs generator][dg] as [Kubernetes API
+reference][ar], but here's why I wrote a different parser/generator:
+
+1. Today, Kubernetes API [does not][pr] provide OpenAPI specs for CRDs (e.g.
+ Knative), therefore the [gen-apidocs][ga]
+ generator used by Kubernetes won't work.
+
+2. Even when Kubernetes API starts providing OpenAPI specs for CRDs, your CRD
+ must have a validation schema (e.g. Knative API doesn't!)
+
+3. Kubernetes [gen-apidocs][ga] parser relies on running a `kube-apiserver` and
+ calling `/apis` endpoint to get OpenAPI specs to generate docs. **This tool
+ doesn't need that!**
+
+[dg]: https://github.com/kubernetes-incubator/reference-docs/
+[ga]: https://github.com/kubernetes-incubator/reference-docs/tree/master/gen-apidocs/generators
+[pr]: https://github.com/kubernetes/kubernetes/pull/71192
+
+## How
+
+This is a custom API reference docs generator that uses the
+[k8s.io/gengo](https://godoc.org/k8s.io/gengo) project to parse types and
+generate API documentation from it.
+
+Capabilities of this tool include:
+
+- Doesn't depend on OpenAPI specs, or kube-apiserver, or a running cluster.
+- Relies only on the Go source code (pkg/apis/**/*.go) to parse API types.
+- Can link to other sites for external APIs. For example, if your types have a
+ reference to Kubernetes core/v1.PodSpec, you can link to it.
+- [Configurable](./example-config.json) settings to hide certain fields or types
+ entirely from the generated output.
+- Either output to a file or start a live http-server (for rapid iteration).
+- Supports markdown rendering from godoc type, package and field comments.
+
+## Try it out
+
+1. Clone this repository.
+
+2. Make sure you have go1.11+ instaled. Then run `go build`, you should get a
+ `refdocs` binary executable.
+
+3. Clone a Knative repository, set GOPATH correctly,
+ and call the compiled binary within that directory.
+
+ ```sh
+ # go into a repository root with GOPATH set. (I use my own script
+ # goclone(1) to have a separate GOPATH for each repo I clone.)
+ $ goclone knative/build
+
+ $ /path/to/refdocs \
+ -config "/path/to/example-config.json" \
+ -api-dir "github.com/knative/build/pkg/apis/build/v1alpha1" \
+ -out-file docs.html
+ ```
+
+4. Visit `docs.html` to view the results.
+
+-----
+
+This is not an official Google project. See [LICENSE](./LICENSE).
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json
new file mode 100644
index 0000000..e5fdbc7
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json
@@ -0,0 +1,28 @@
+{
+ "hideMemberFields": [
+ "TypeMeta"
+ ],
+ "hideTypePatterns": [
+ "ParseError$",
+ "List$"
+ ],
+ "externalPackages": [
+ {
+ "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
+ "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
+ },
+ {
+ "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
+ "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
+ },
+ {
+ "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/",
+ "docsURLTemplate": "https://godoc.org/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}"
+ }
+ ],
+ "typeDisplayNamePrefixOverrides": {
+ "k8s.io/api/": "Kubernetes ",
+ "k8s.io/apimachinery/pkg/apis/": "Kubernetes "
+ },
+ "markdownDisabled": false
+}
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod
new file mode 100644
index 0000000..a42a99a
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.mod
@@ -0,0 +1,12 @@
+module github.com/ahmetb/gen-crd-api-reference-docs
+
+require (
+ github.com/pkg/errors v0.8.1
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/russross/blackfriday/v2 v2.0.1
+ github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.3 // indirect
+ golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8 // indirect
+ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6
+ k8s.io/klog v0.2.0
+)
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum
new file mode 100644
index 0000000..0378ec0
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/go.sum
@@ -0,0 +1,23 @@
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+golang.org/x/tools v0.0.0-20181221235234-d00ac6d27372 h1:zWPUEY/PjVHT+zO3L8OfkjrtIjf55joTxn/RQP/AjOI=
+golang.org/x/tools v0.0.0-20181221235234-d00ac6d27372/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7 h1:zjNgw2qqBQmKd0S59lGZBQqFxJqUZroVbDphfnVm5do=
+k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk=
+k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
+k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
diff --git a/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go
new file mode 100644
index 0000000..33d0858
--- /dev/null
+++ b/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main.go
@@ -0,0 +1,628 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "html/template"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ texttemplate "text/template"
+ "time"
+ "unicode"
+
+ "github.com/pkg/errors"
+ "github.com/russross/blackfriday/v2"
+ "k8s.io/gengo/parser"
+ "k8s.io/gengo/types"
+ "k8s.io/klog"
+)
+
+var (
+ flConfig = flag.String("config", "", "path to config file")
+ flAPIDir = flag.String("api-dir", "", "api directory (or import path), point this to pkg/apis")
+ flTemplateDir = flag.String("template-dir", "template", "path to template/ dir")
+
+ flHTTPAddr = flag.String("http-addr", "", "start an HTTP server on specified addr to view the result (e.g. :8080)")
+ flOutFile = flag.String("out-file", "", "path to output file to save the result")
+)
+
+const (
+ docCommentForceIncludes = "// +gencrdrefdocs:force"
+)
+
+type generatorConfig struct {
+ // HiddenMemberFields hides fields with specified names on all types.
+ HiddenMemberFields []string `json:"hideMemberFields"`
+
+ // HideTypePatterns hides types matching the specified patterns from the
+ // output.
+ HideTypePatterns []string `json:"hideTypePatterns"`
+
+ // ExternalPackages lists recognized external package references and how to
+ // link to them.
+ ExternalPackages []externalPackage `json:"externalPackages"`
+
+ // TypeDisplayNamePrefixOverrides is a mapping of how to override displayed
+ // name for types with certain prefixes with what value.
+ TypeDisplayNamePrefixOverrides map[string]string `json:"typeDisplayNamePrefixOverrides"`
+
+ // MarkdownDisabled controls markdown rendering for comment lines.
+ MarkdownDisabled bool `json:"markdownDisabled"`
+}
+
+type externalPackage struct {
+ TypeMatchPrefix string `json:"typeMatchPrefix"`
+ DocsURLTemplate string `json:"docsURLTemplate"`
+}
+
+type apiPackage struct {
+ apiGroup string
+ apiVersion string
+ GoPackages []*types.Package
+ Types []*types.Type // because multiple 'types.Package's can add types to an apiVersion
+}
+
+func (v *apiPackage) identifier() string { return fmt.Sprintf("%s/%s", v.apiGroup, v.apiVersion) }
+
+func init() {
+ klog.InitFlags(nil)
+ flag.Set("alsologtostderr", "true") // for klog
+ flag.Parse()
+
+ if *flConfig == "" {
+ panic("-config not specified")
+ }
+ if *flAPIDir == "" {
+ panic("-api-dir not specified")
+ }
+ if *flHTTPAddr == "" && *flOutFile == "" {
+ panic("-out-file or -http-addr must be specified")
+ }
+ if *flHTTPAddr != "" && *flOutFile != "" {
+ panic("only -out-file or -http-addr can be specified")
+ }
+ if err := resolveTemplateDir(*flTemplateDir); err != nil {
+ panic(err)
+ }
+
+}
+
+func resolveTemplateDir(dir string) error {
+ path, err := filepath.Abs(dir)
+ if err != nil {
+ return err
+ }
+ if fi, err := os.Stat(path); err != nil {
+ return errors.Wrapf(err, "cannot read the %s directory", path)
+ } else if !fi.IsDir() {
+ return errors.Errorf("%s path is not a directory", path)
+ }
+ return nil
+}
+
+func main() {
+ defer klog.Flush()
+
+ f, err := os.Open(*flConfig)
+ if err != nil {
+ klog.Fatalf("failed to open config file: %+v", err)
+ }
+ d := json.NewDecoder(f)
+ d.DisallowUnknownFields()
+ var config generatorConfig
+ if err := d.Decode(&config); err != nil {
+ klog.Fatalf("failed to parse config file: %+v", err)
+ }
+
+ klog.Infof("parsing go packages in directory %s", *flAPIDir)
+ pkgs, err := parseAPIPackages(*flAPIDir)
+ if err != nil {
+ klog.Fatal(err)
+ }
+ if len(pkgs) == 0 {
+ klog.Fatalf("no API packages found in %s", *flAPIDir)
+ }
+
+ apiPackages, err := combineAPIPackages(pkgs)
+ if err != nil {
+ klog.Fatal(err)
+ }
+
+ mkOutput := func() (string, error) {
+ var b bytes.Buffer
+ err := render(&b, apiPackages, config)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to render the result")
+ }
+
+ // remove trailing whitespace from each html line for markdown renderers
+ s := regexp.MustCompile(`(?m)^\s+`).ReplaceAllString(b.String(), "")
+ return s, nil
+ }
+
+ if *flOutFile != "" {
+ dir := filepath.Dir(*flOutFile)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ klog.Fatalf("failed to create dir %s: %v", dir, err)
+ }
+ s, err := mkOutput()
+ if err != nil {
+ klog.Fatalf("failed: %+v", err)
+ }
+ if err := ioutil.WriteFile(*flOutFile, []byte(s), 0644); err != nil {
+ klog.Fatalf("failed to write to out file: %v", err)
+ }
+ klog.Infof("written to %s", *flOutFile)
+ }
+
+ if *flHTTPAddr != "" {
+ h := func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ defer func() { klog.Infof("request took %v", time.Since(now)) }()
+ s, err := mkOutput()
+ if err != nil {
+ fmt.Fprintf(w, "error: %+v", err)
+ klog.Warningf("failed: %+v", err)
+ }
+ if _, err := fmt.Fprint(w, s); err != nil {
+ klog.Warningf("response write error: %v", err)
+ }
+ }
+ http.HandleFunc("/", h)
+ klog.Infof("server listening at %s", *flHTTPAddr)
+ klog.Fatal(http.ListenAndServe(*flHTTPAddr, nil))
+ }
+}
+
+// groupName extracts the "//+groupName" meta-comment from the specified
+// package's comments, or returns empty string if it cannot be found.
+func groupName(pkg *types.Package) string {
+ m := types.ExtractCommentTags("+", pkg.Comments)
+ v := m["groupName"]
+ if len(v) == 1 {
+ return v[0]
+ }
+ return ""
+}
+
+func parseAPIPackages(dir string) ([]*types.Package, error) {
+ b := parser.New()
+ // the following will silently fail (turn on -v=4 to see logs)
+ if err := b.AddDirRecursive(*flAPIDir); err != nil {
+ return nil, err
+ }
+ scan, err := b.FindTypes()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse pkgs and types")
+ }
+ var pkgNames []string
+ for p := range scan {
+ pkg := scan[p]
+ klog.V(3).Infof("trying package=%v groupName=%s", p, groupName(pkg))
+
+ // Do not pick up packages that are in vendor/ as API packages. (This
+ // happened in knative/eventing-sources/vendor/..., where a package
+ // matched the pattern, but it didn't have a compatible import path).
+ if isVendorPackage(pkg) {
+ klog.V(3).Infof("package=%v coming from vendor/, ignoring.", p)
+ continue
+ }
+
+ if groupName(pkg) != "" && len(pkg.Types) > 0 || containsString(pkg.DocComments, docCommentForceIncludes) {
+ klog.V(3).Infof("package=%v has groupName and has types", p)
+ pkgNames = append(pkgNames, p)
+ }
+ }
+ sort.Strings(pkgNames)
+ var pkgs []*types.Package
+ for _, p := range pkgNames {
+ klog.Infof("using package=%s", p)
+ pkgs = append(pkgs, scan[p])
+ }
+ return pkgs, nil
+}
+
+func containsString(sl []string, str string) bool {
+ for _, s := range sl {
+ if str == s {
+ return true
+ }
+ }
+ return false
+}
+
+// combineAPIPackages groups the Go packages by the they
+// offer, and combines the types in them.
+func combineAPIPackages(pkgs []*types.Package) ([]*apiPackage, error) {
+ pkgMap := make(map[string]*apiPackage)
+
+ for _, pkg := range pkgs {
+ apiGroup, apiVersion, err := apiVersionForPackage(pkg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not get apiVersion for package %s", pkg.Path)
+ }
+
+ typeList := make([]*types.Type, 0, len(pkg.Types))
+ for _, t := range pkg.Types {
+ typeList = append(typeList, t)
+ }
+
+ id := fmt.Sprintf("%s/%s", apiGroup, apiVersion)
+ v, ok := pkgMap[id]
+ if !ok {
+ pkgMap[id] = &apiPackage{
+ apiGroup: apiGroup,
+ apiVersion: apiVersion,
+ Types: typeList,
+ GoPackages: []*types.Package{pkg},
+ }
+ } else {
+ v.Types = append(v.Types, typeList...)
+ v.GoPackages = append(v.GoPackages, pkg)
+ }
+ }
+ out := make([]*apiPackage, 0, len(pkgMap))
+ for _, v := range pkgMap {
+ out = append(out, v)
+ }
+ return out, nil
+}
+
+// isVendorPackage determines if package is coming from vendor/ dir.
+func isVendorPackage(pkg *types.Package) bool {
+ vendorPattern := string(os.PathSeparator) + "vendor" + string(os.PathSeparator)
+ return strings.Contains(pkg.SourcePath, vendorPattern)
+}
+
+func findTypeReferences(pkgs []*apiPackage) map[*types.Type][]*types.Type {
+ m := make(map[*types.Type][]*types.Type)
+ for _, pkg := range pkgs {
+ for _, typ := range pkg.Types {
+ for _, member := range typ.Members {
+ t := member.Type
+ t = tryDereference(t)
+ m[t] = append(m[t], typ)
+ }
+ }
+ }
+ return m
+}
+
+func isExportedType(t *types.Type) bool {
+ // TODO(ahmetb) use types.ExtractSingleBoolCommentTag() to parse +genclient
+ // https://godoc.org/k8s.io/gengo/types#ExtractCommentTags
+ return strings.Contains(strings.Join(t.SecondClosestCommentLines, "\n"), "+genclient")
+}
+
+func fieldName(m types.Member) string {
+ v := reflect.StructTag(m.Tags).Get("json")
+ v = strings.TrimSuffix(v, ",omitempty")
+ v = strings.TrimSuffix(v, ",inline")
+ if v != "" {
+ return v
+ }
+ return m.Name
+}
+
+func fieldEmbedded(m types.Member) bool {
+ return strings.Contains(reflect.StructTag(m.Tags).Get("json"), ",inline")
+}
+
+func isLocalType(t *types.Type, typePkgMap map[*types.Type]*apiPackage) bool {
+ t = tryDereference(t)
+ _, ok := typePkgMap[t]
+ return ok
+}
+
+func renderComments(s []string, markdown bool) string {
+ s = filterCommentTags(s)
+ doc := strings.Join(s, "\n")
+
+ if markdown {
+ // TODO(ahmetb): when a comment includes stuff like "http://"
+ // we treat this as a HTML tag with markdown renderer below. solve this.
+ return string(blackfriday.Run([]byte(doc)))
+ }
+ return nl2br(doc)
+}
+
+func safe(s string) template.HTML { return template.HTML(s) }
+
+func nl2br(s string) string {
+ return strings.Replace(s, "\n\n", string(template.HTML("
")), -1)
+}
+
+func hiddenMember(m types.Member, c generatorConfig) bool {
+ for _, v := range c.HiddenMemberFields {
+ if m.Name == v {
+ return true
+ }
+ }
+ return false
+}
+
+func typeIdentifier(t *types.Type) string {
+ t = tryDereference(t)
+ return t.Name.String() // {PackagePath.Name}
+}
+
+// apiGroupForType looks up apiGroup for the given type
+func apiGroupForType(t *types.Type, typePkgMap map[*types.Type]*apiPackage) string {
+ t = tryDereference(t)
+
+ v := typePkgMap[t]
+ if v == nil {
+ klog.Warningf("WARNING: cannot read apiVersion for %s from type=>pkg map", t.Name.String())
+ return ""
+ }
+
+ return v.identifier()
+}
+
+// anchorIDForLocalType returns the #anchor string for the local type
+func anchorIDForLocalType(t *types.Type, typePkgMap map[*types.Type]*apiPackage) string {
+ return fmt.Sprintf("%s.%s", apiGroupForType(t, typePkgMap), t.Name.Name)
+}
+
+// linkForType returns an anchor to the type if it can be generated. returns
+// empty string if it is not a local type or unrecognized external type.
+func linkForType(t *types.Type, c generatorConfig, typePkgMap map[*types.Type]*apiPackage) (string, error) {
+ t = tryDereference(t) // dereference kind=Pointer
+
+ if isLocalType(t, typePkgMap) {
+ return "#" + anchorIDForLocalType(t, typePkgMap), nil
+ }
+
+ var arrIndex = func(a []string, i int) string {
+ return a[(len(a)+i)%len(a)]
+ }
+
+ // types like k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta,
+ // k8s.io/api/core/v1.Container, k8s.io/api/autoscaling/v1.CrossVersionObjectReference,
+ // github.com/knative/build/pkg/apis/build/v1alpha1.BuildSpec
+ if t.Kind == types.Struct || t.Kind == types.Pointer || t.Kind == types.Interface || t.Kind == types.Alias {
+ id := typeIdentifier(t) // gives {{ImportPath.Identifier}} for type
+ segments := strings.Split(t.Name.Package, "/") // to parse [meta, v1] from "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ for _, v := range c.ExternalPackages {
+ r, err := regexp.Compile(v.TypeMatchPrefix)
+ if err != nil {
+ return "", errors.Wrapf(err, "pattern %q failed to compile", v.TypeMatchPrefix)
+ }
+ if r.MatchString(id) {
+ tpl, err := texttemplate.New("").Funcs(map[string]interface{}{
+ "lower": strings.ToLower,
+ "arrIndex": arrIndex,
+ }).Parse(v.DocsURLTemplate)
+ if err != nil {
+ return "", errors.Wrap(err, "docs URL template failed to parse")
+ }
+
+ var b bytes.Buffer
+ if err := tpl.
+ Execute(&b, map[string]interface{}{
+ "TypeIdentifier": t.Name.Name,
+ "PackagePath": t.Name.Package,
+ "PackageSegments": segments,
+ }); err != nil {
+ return "", errors.Wrap(err, "docs url template execution error")
+ }
+ return b.String(), nil
+ }
+ }
+ klog.Warningf("not found external link source for type %v", t.Name)
+ }
+ return "", nil
+}
+
+// tryDereference returns the underlying type when t is a pointer, map, or slice.
+func tryDereference(t *types.Type) *types.Type {
+ if t.Elem != nil {
+ return t.Elem
+ }
+ return t
+}
+
+func typeDisplayName(t *types.Type, c generatorConfig, typePkgMap map[*types.Type]*apiPackage) string {
+ s := typeIdentifier(t)
+ if isLocalType(t, typePkgMap) {
+ s = tryDereference(t).Name.Name
+ }
+ if t.Kind == types.Pointer {
+ s = strings.TrimLeft(s, "*")
+ }
+
+ switch t.Kind {
+ case types.Struct,
+ types.Interface,
+ types.Alias,
+ types.Pointer,
+ types.Slice,
+ types.Builtin:
+ // noop
+ case types.Map:
+ // return original name
+ return t.Name.Name
+ default:
+ klog.Fatalf("type %s has kind=%v which is unhandled", t.Name, t.Kind)
+ }
+
+ // substitute prefix, if registered
+ for prefix, replacement := range c.TypeDisplayNamePrefixOverrides {
+ if strings.HasPrefix(s, prefix) {
+ s = strings.Replace(s, prefix, replacement, 1)
+ }
+ }
+
+ if t.Kind == types.Slice {
+ s = "[]" + s
+ }
+
+ return s
+}
+
+func hideType(t *types.Type, c generatorConfig) bool {
+ for _, pattern := range c.HideTypePatterns {
+ if regexp.MustCompile(pattern).MatchString(t.Name.String()) {
+ return true
+ }
+ }
+ if !isExportedType(t) && unicode.IsLower(rune(t.Name.Name[0])) {
+ // types that start with lowercase
+ return true
+ }
+ return false
+}
+
+func typeReferences(t *types.Type, c generatorConfig, references map[*types.Type][]*types.Type) []*types.Type {
+ var out []*types.Type
+ m := make(map[*types.Type]struct{})
+ for _, ref := range references[t] {
+ if !hideType(ref, c) {
+ m[ref] = struct{}{}
+ }
+ }
+ for k := range m {
+ out = append(out, k)
+ }
+ sortTypes(out)
+ return out
+}
+
+func sortTypes(typs []*types.Type) []*types.Type {
+ sort.Slice(typs, func(i, j int) bool {
+ t1, t2 := typs[i], typs[j]
+ if isExportedType(t1) && !isExportedType(t2) {
+ return true
+ } else if !isExportedType(t1) && isExportedType(t2) {
+ return false
+ }
+ return t1.Name.Name < t2.Name.Name
+ })
+ return typs
+}
+
+func visibleTypes(in []*types.Type, c generatorConfig) []*types.Type {
+ var out []*types.Type
+ for _, t := range in {
+ if !hideType(t, c) {
+ out = append(out, t)
+ }
+ }
+ return out
+}
+
+func packageDisplayName(pkg *types.Package, apiVersions map[string]string) string {
+ apiGroupVersion, ok := apiVersions[pkg.Path]
+ if ok {
+ return apiGroupVersion
+ }
+ return pkg.Path // go import path
+}
+
+func filterCommentTags(comments []string) []string {
+ var out []string
+ for _, v := range comments {
+ if !strings.HasPrefix(strings.TrimSpace(v), "+") {
+ out = append(out, v)
+ }
+ }
+ return out
+}
+
+func isOptionalMember(m types.Member) bool {
+ tags := types.ExtractCommentTags("+", m.CommentLines)
+ _, ok := tags["optional"]
+ return ok
+}
+
+func apiVersionForPackage(pkg *types.Package) (string, string, error) {
+ group := groupName(pkg)
+ version := pkg.Name // assumes basename (i.e. "v1" in "core/v1") is apiVersion
+ r := `^v\d+((alpha|beta)\d+)?$`
+ if !regexp.MustCompile(r).MatchString(version) {
+ return "", "", errors.Errorf("cannot infer kubernetes apiVersion of go package %s (basename %q doesn't match expected pattern %s that's used to determine apiVersion)", pkg.Path, version, r)
+ }
+ return group, version, nil
+}
+
+// extractTypeToPackageMap creates a *types.Type map to apiPackage
+func extractTypeToPackageMap(pkgs []*apiPackage) map[*types.Type]*apiPackage {
+ out := make(map[*types.Type]*apiPackage)
+ for _, ap := range pkgs {
+ for _, t := range ap.Types {
+ out[t] = ap
+ }
+ }
+ return out
+}
+
+// packageMapToList flattens the map.
+func packageMapToList(pkgs map[string]*apiPackage) []*apiPackage {
+ // TODO(ahmetb): we should probably not deal with maps, this type can be
+ // a list everywhere.
+ out := make([]*apiPackage, 0, len(pkgs))
+ for _, v := range pkgs {
+ out = append(out, v)
+ }
+ return out
+}
+
+func render(w io.Writer, pkgs []*apiPackage, config generatorConfig) error {
+ references := findTypeReferences(pkgs)
+ typePkgMap := extractTypeToPackageMap(pkgs)
+
+ t, err := template.New("").Funcs(map[string]interface{}{
+ "isExportedType": isExportedType,
+ "fieldName": fieldName,
+ "fieldEmbedded": fieldEmbedded,
+ "typeIdentifier": func(t *types.Type) string { return typeIdentifier(t) },
+ "typeDisplayName": func(t *types.Type) string { return typeDisplayName(t, config, typePkgMap) },
+ "visibleTypes": func(t []*types.Type) []*types.Type { return visibleTypes(t, config) },
+ "renderComments": func(s []string) string { return renderComments(s, !config.MarkdownDisabled) },
+ "packageDisplayName": func(p *apiPackage) string { return p.identifier() },
+ "apiGroup": func(t *types.Type) string { return apiGroupForType(t, typePkgMap) },
+ "packageAnchorID": func(p *apiPackage) string {
+ // TODO(ahmetb): currently this is the same as packageDisplayName
+ // func, and it's fine since it retuns valid DOM id strings like
+ // 'serving.knative.dev/v1alpha1' which is valid per HTML5, except
+ // spaces, so just trim those.
+ return strings.Replace(p.identifier(), " ", "", -1)
+ },
+ "linkForType": func(t *types.Type) string {
+ v, err := linkForType(t, config, typePkgMap)
+ if err != nil {
+ klog.Fatal(errors.Wrapf(err, "error getting link for type=%s", t.Name))
+ return ""
+ }
+ return v
+ },
+ "anchorIDForType": func(t *types.Type) string { return anchorIDForLocalType(t, typePkgMap) },
+ "safe": safe,
+ "sortedTypes": sortTypes,
+ "typeReferences": func(t *types.Type) []*types.Type { return typeReferences(t, config, references) },
+ "hiddenMember": func(m types.Member) bool { return hiddenMember(m, config) },
+ "isLocalType": isLocalType,
+ "isOptionalMember": isOptionalMember,
+ }).ParseGlob(filepath.Join(*flTemplateDir, "*.tpl"))
+ if err != nil {
+ return errors.Wrap(err, "parse error")
+ }
+
+ gitCommit, _ := exec.Command("git", "rev-parse", "--short", "HEAD").Output()
+ return errors.Wrap(t.ExecuteTemplate(w, "packages", map[string]interface{}{
+ "packages": pkgs,
+ "config": config,
+ "gitCommit": strings.TrimSpace(string(gitCommit)),
+ }), "template execution error")
+}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..d7d14f8
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
+ } else {
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml
new file mode 100644
index 0000000..c516ea8
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - "1.x"
+ - master
+env:
+ - TAGS=""
+ - TAGS="-tags purego"
+script: go test $TAGS -v ./...
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
new file mode 100644
index 0000000..24b5306
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 0000000..2fd8693
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,67 @@
+# xxhash
+
+[](https://godoc.org/github.com/cespare/xxhash)
+[](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B | 979.66 MB/s | 1291.17 MB/s |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s |
+| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod
new file mode 100644
index 0000000..49f6760
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/go.mod
@@ -0,0 +1,3 @@
+module github.com/cespare/xxhash/v2
+
+go 1.11
diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 0000000..db0b35f
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,236 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+ prime1v = prime1
+ prime2v = prime2
+ prime3v = prime3
+ prime4v = prime4
+ prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = prime1v + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -prime1v
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(d.mem[d.n:], b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ copy(d.mem[d.n:], b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[32-d.n:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ i, end := 0, d.n
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(d.mem[i:i+8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for i < end {
+ h ^= uint64(d.mem[i]) * prime5
+ h = rol11(h) * prime1
+ i++
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ b = b[len(d.mem):]
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
new file mode 100644
index 0000000..ad14b80
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
new file mode 100644
index 0000000..d580e32
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX h
+// CX pointer to advance through b
+// DX n
+// BX loop end
+// R8 v1, k1
+// R9 v2
+// R10 v3
+// R11 v4
+// R12 tmp
+// R13 prime1v
+// R14 prime2v
+// R15 prime4v
+
+// round reads from and advances the buffer pointer in CX.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+ MOVQ (CX), R12 \
+ ADDQ $8, CX \
+ IMULQ R14, R12 \
+ ADDQ R12, r \
+ ROLQ $31, r \
+ IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+#define mergeRound(acc, val) \
+ IMULQ R14, val \
+ ROLQ $31, val \
+ IMULQ R13, val \
+ XORQ val, acc \
+ IMULQ R13, acc \
+ ADDQ R15, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+ // Load fixed primes.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+ MOVQ ·prime4v(SB), R15
+
+ // Load slice.
+ MOVQ b_base+0(FP), CX
+ MOVQ b_len+8(FP), DX
+ LEAQ (CX)(DX*1), BX
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, BX
+
+ // Check whether we have at least one block.
+ CMPQ DX, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ R13, R8
+ ADDQ R14, R8
+ MOVQ R14, R9
+ XORQ R10, R10
+ XORQ R11, R11
+ SUBQ R13, R11
+
+ // Loop until CX > BX.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ MOVQ R8, AX
+ ROLQ $1, AX
+ MOVQ R9, R12
+ ROLQ $7, R12
+ ADDQ R12, AX
+ MOVQ R10, R12
+ ROLQ $12, R12
+ ADDQ R12, AX
+ MOVQ R11, R12
+ ROLQ $18, R12
+ ADDQ R12, AX
+
+ mergeRound(AX, R8)
+ mergeRound(AX, R9)
+ mergeRound(AX, R10)
+ mergeRound(AX, R11)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+ ADDQ DX, AX
+
+ // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ ADDQ $24, BX
+
+ CMPQ CX, BX
+ JG fourByte
+
+wordLoop:
+ // Calculate k1.
+ MOVQ (CX), R8
+ ADDQ $8, CX
+ IMULQ R14, R8
+ ROLQ $31, R8
+ IMULQ R13, R8
+
+ XORQ R8, AX
+ ROLQ $27, AX
+ IMULQ R13, AX
+ ADDQ R15, AX
+
+ CMPQ CX, BX
+ JLE wordLoop
+
+fourByte:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JG singles
+
+ MOVL (CX), R8
+ ADDQ $4, CX
+ IMULQ R13, R8
+ XORQ R8, AX
+
+ ROLQ $23, AX
+ IMULQ R14, AX
+ ADDQ ·prime3v(SB), AX
+
+singles:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JGE finalize
+
+singlesLoop:
+ MOVBQZX (CX), R12
+ ADDQ $1, CX
+ IMULQ ·prime5v(SB), R12
+ XORQ R12, AX
+
+ ROLQ $11, AX
+ IMULQ R13, AX
+
+ CMPQ CX, BX
+ JL singlesLoop
+
+finalize:
+ MOVQ AX, R12
+ SHRQ $33, R12
+ XORQ R12, AX
+ IMULQ R14, AX
+ MOVQ AX, R12
+ SHRQ $29, R12
+ XORQ R12, AX
+ IMULQ ·prime3v(SB), AX
+ MOVQ AX, R12
+ SHRQ $32, R12
+ XORQ R12, AX
+
+ MOVQ AX, ret+24(FP)
+ RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+
+ // Load slice.
+ MOVQ b_base+8(FP), CX
+ MOVQ b_len+16(FP), DX
+ LEAQ (CX)(DX*1), BX
+ SUBQ $32, BX
+
+ // Load vN from d.
+ MOVQ d+0(FP), AX
+ MOVQ 0(AX), R8 // v1
+ MOVQ 8(AX), R9 // v2
+ MOVQ 16(AX), R10 // v3
+ MOVQ 24(AX), R11 // v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ // Copy vN back to d.
+ MOVQ R8, 0(AX)
+ MOVQ R9, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R11, 24(AX)
+
+ // The number of bytes written is CX minus the old base pointer.
+ SUBQ b_base+8(FP), CX
+ MOVQ CX, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
new file mode 100644
index 0000000..4a5a821
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := prime1v + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -prime1v
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ i, end := 0, len(b)
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(b[i:i+8:len(b)]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for ; i < end; i++ {
+ h ^= uint64(b[i]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
new file mode 100644
index 0000000..fc9bea7
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 0000000..53bf76e
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,46 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Notes:
+//
+// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
+// for some discussion about these unsafe conversions.
+//
+// In the future it's possible that compiler optimizations will make these
+// unsafe operations unnecessary: https://golang.org/issue/2205.
+//
+// Both of these wrapper functions still incur function call overhead since they
+// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
+// for strings to squeeze out a bit more speed. Mid-stack inlining should
+// eventually fix this.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return d.Write(b)
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
new file mode 100644
index 0000000..3938f38
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
@@ -0,0 +1,19 @@
+# Copyright (C) 2017 SUSE LLC. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+language: go
+go:
+ - 1.7.x
+ - 1.8.x
+ - tip
+
+os:
+ - linux
+ - osx
+
+script:
+ - go test -cover -v ./...
+
+notifications:
+ email: false
diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
new file mode 100644
index 0000000..bec842f
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
@@ -0,0 +1,28 @@
+Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+Copyright (C) 2017 SUSE LLC. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md
new file mode 100644
index 0000000..49b2baa
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/README.md
@@ -0,0 +1,65 @@
+## `filepath-securejoin` ##
+
+[](https://travis-ci.org/cyphar/filepath-securejoin)
+
+An implementation of `SecureJoin`, a [candidate for inclusion in the Go
+standard library][go#20126]. The purpose of this function is to be a "secure"
+alternative to `filepath.Join`, and in particular it provides certain
+guarantees that are not provided by `filepath.Join`.
+
+This is the function prototype:
+
+```go
+func SecureJoin(root, unsafePath string) (string, error)
+```
+
+This library **guarantees** the following:
+
+* If no error is set, the resulting string **must** be a child path of
+ `SecureJoin` and will not contain any symlink path components (they will all
+ be expanded).
+
+* When expanding symlinks, all symlink path components **must** be resolved
+ relative to the provided root. In particular, this can be considered a
+ userspace implementation of how `chroot(2)` operates on file paths. Note that
+ these symlinks will **not** be expanded lexically (`filepath.Clean` is not
+ called on the input before processing).
+
+* Non-existant path components are unaffected by `SecureJoin` (similar to
+ `filepath.EvalSymlinks`'s semantics).
+
+* The returned path will always be `filepath.Clean`ed and thus not contain any
+ `..` components.
+
+A (trivial) implementation of this function on GNU/Linux systems could be done
+with the following (note that this requires root privileges and is far more
+opaque than the implementation in this library, and also requires that
+`readlink` is inside the `root` path):
+
+```go
+package securejoin
+
+import (
+ "os/exec"
+ "path/filepath"
+)
+
+func SecureJoin(root, unsafePath string) (string, error) {
+ unsafePath = string(filepath.Separator) + unsafePath
+ cmd := exec.Command("chroot", root,
+ "readlink", "--canonicalize-missing", "--no-newline", unsafePath)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ expanded := string(output)
+ return filepath.Join(root, expanded), nil
+}
+```
+
+[go#20126]: https://github.com/golang/go/issues/20126
+
+### License ###
+
+The license of this project is the same as Go, which is a BSD 3-clause license
+available in the `LICENSE` file.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
new file mode 100644
index 0000000..ee1372d
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION
@@ -0,0 +1 @@
+0.2.2
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
new file mode 100644
index 0000000..c4ca3d7
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/join.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+// Copyright (C) 2017 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package securejoin is an implementation of the hopefully-soon-to-be-included
+// SecureJoin helper that is meant to be part of the "path/filepath" package.
+// The purpose of this project is to provide a PoC implementation to make the
+// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
+// tangible.
+package securejoin
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
+// evaluated in attempting to securely join the two given paths.
+var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
+
+// IsNotExist tells you if err is an error that implies that either the path
+// accessed does not exist (or path components don't exist). This is
+// effectively a more broad version of os.IsNotExist.
+func IsNotExist(err error) bool {
+ // If it's a bone-fide ENOENT just bail.
+ if os.IsNotExist(errors.Cause(err)) {
+ return true
+ }
+
+ // Check that it's not actually an ENOTDIR, which in some cases is a more
+ // convoluted case of ENOENT (usually involving weird paths).
+ var errno error
+ switch err := errors.Cause(err).(type) {
+ case *os.PathError:
+ errno = err.Err
+ case *os.LinkError:
+ errno = err.Err
+ case *os.SyscallError:
+ errno = err.Err
+ }
+ return errno == syscall.ENOTDIR || errno == syscall.ENOENT
+}
+
+// SecureJoinVFS joins the two given path components (similar to Join) except
+// that the returned path is guaranteed to be scoped inside the provided root
+// path (when evaluated). Any symbolic links in the path are evaluated with the
+// given root treated as the root of the filesystem, similar to a chroot. The
+// filesystem state is evaluated through the given VFS interface (if nil, the
+// standard os.* family of functions are used).
+//
+// Note that the guarantees provided by this function only apply if the path
+// components in the returned string are not modified (in other words are not
+// replaced with symlinks on the filesystem) after this function has returned.
+// Such a symlink race is necessarily out-of-scope of SecureJoin.
+func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
+ // Use the os.* VFS implementation if none was specified.
+ if vfs == nil {
+ vfs = osVFS{}
+ }
+
+ var path bytes.Buffer
+ n := 0
+ for unsafePath != "" {
+ if n > 255 {
+ return "", ErrSymlinkLoop
+ }
+
+ // Next path component, p.
+ i := strings.IndexRune(unsafePath, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, unsafePath = unsafePath, ""
+ } else {
+ p, unsafePath = unsafePath[:i], unsafePath[i+1:]
+ }
+
+ // Create a cleaned path, using the lexical semantics of /../a, to
+ // create a "scoped" path component which can safely be joined to fullP
+ // for evaluation. At this point, path.String() doesn't contain any
+ // symlink components.
+ cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p)
+ if cleanP == string(filepath.Separator) {
+ path.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ // Figure out whether the path is a symlink.
+ fi, err := vfs.Lstat(fullP)
+ if err != nil && !IsNotExist(err) {
+ return "", err
+ }
+ // Treat non-existent path components the same as non-symlinks (we
+ // can't do any better here).
+ if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
+ path.WriteString(p)
+ path.WriteRune(filepath.Separator)
+ continue
+ }
+
+ // Only increment when we actually dereference a link.
+ n++
+
+ // It's a symlink, expand it by prepending it to the yet-unparsed path.
+ dest, err := vfs.Readlink(fullP)
+ if err != nil {
+ return "", err
+ }
+ // Absolute symlinks reset any work we've already done.
+ if filepath.IsAbs(dest) {
+ path.Reset()
+ }
+ unsafePath = dest + string(filepath.Separator) + unsafePath
+ }
+
+ // We have to clean path.String() here because it may contain '..'
+ // components that are entirely lexical, but would be misleading otherwise.
+ // And finally do a final clean to ensure that root is also lexically
+ // clean.
+ fullP := filepath.Clean(string(filepath.Separator) + path.String())
+ return filepath.Clean(root + fullP), nil
+}
+
+// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
+// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
+func SecureJoin(root, unsafePath string) (string, error) {
+ return SecureJoinVFS(root, unsafePath, nil)
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
new file mode 100644
index 0000000..66bb574
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
@@ -0,0 +1 @@
+github.com/pkg/errors v0.8.0
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
new file mode 100644
index 0000000..a82a5ea
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
@@ -0,0 +1,41 @@
+// Copyright (C) 2017 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import "os"
+
+// In future this should be moved into a separate package, because now there
+// are several projects (umoci and go-mtree) that are using this sort of
+// interface.
+
+// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
+// equivalent to using the standard os.* family of functions. This is mainly
+// used for the purposes of mock testing, but also can be used to otherwise use
+// SecureJoin with VFS-like system.
+type VFS interface {
+ // Lstat returns a FileInfo describing the named file. If the file is a
+ // symbolic link, the returned FileInfo describes the symbolic link. Lstat
+ // makes no attempt to follow the link. These semantics are identical to
+ // os.Lstat.
+ Lstat(name string) (os.FileInfo, error)
+
+ // Readlink returns the destination of the named symbolic link. These
+ // semantics are identical to os.Readlink.
+ Readlink(name string) (string, error)
+}
+
+// osVFS is the "nil" VFS, in that it just passes everything through to the os
+// module.
+type osVFS struct{}
+
+// Lstat returns a FileInfo describing the named file. If the file is a
+// symbolic link, the returned FileInfo describes the symbolic link. Lstat
+// makes no attempt to follow the link. These semantics are identical to
+// os.Lstat.
+func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
+
+// Readlink returns the destination of the named symbolic link. These
+// semantics are identical to os.Readlink.
+func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
new file mode 100644
index 0000000..d4eddcc
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to SpdyStream
+
+Want to hack on spdystream? Awesome! Here are instructions to get you
+started.
+
+SpdyStream is a part of the [Docker](https://docker.io) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read
+[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
+
+Happy hacking!
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE
new file mode 100644
index 0000000..9e4bd4d
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
new file mode 100644
index 0000000..e26cd4f
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS
new file mode 100644
index 0000000..14e2633
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/MAINTAINERS
@@ -0,0 +1,28 @@
+# Spdystream maintainers file
+#
+# This file describes who runs the docker/spdystream project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "dmcgowan",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.dmcgowan]
+ Name = "Derek McGowan"
+ Email = "derek@docker.com"
+ GitHub = "dmcgowan"
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
new file mode 100644
index 0000000..11cccd0
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/README.md
@@ -0,0 +1,77 @@
+# SpdyStream
+
+A multiplexed stream library using spdy
+
+## Usage
+
+Client example (connecting to mirroring server without auth)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/docker/spdystream"
+ "net"
+ "net/http"
+)
+
+func main() {
+ conn, err := net.Dial("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, false)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.NoOpStreamHandler)
+ stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
+ if err != nil {
+ panic(err)
+ }
+
+ stream.Wait()
+
+ fmt.Fprint(stream, "Writing to stream")
+
+ buf := make([]byte, 25)
+ stream.Read(buf)
+ fmt.Println(string(buf))
+
+ stream.Close()
+}
+```
+
+Server example (mirroring server without auth)
+
+```go
+package main
+
+import (
+ "github.com/docker/spdystream"
+ "net"
+)
+
+func main() {
+ listener, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, true)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.MirrorStreamHandler)
+ }
+}
+```
+
+## Copyright and license
+
+Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
new file mode 100644
index 0000000..6031a0d
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -0,0 +1,958 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrInvalidStreamId = errors.New("Invalid stream id")
+ ErrTimeout = errors.New("Timeout occured")
+ ErrReset = errors.New("Stream reset")
+ ErrWriteClosedStream = errors.New("Write on closed stream")
+)
+
+const (
+ FRAME_WORKERS = 5
+ QUEUE_SIZE = 50
+)
+
+type StreamHandler func(stream *Stream)
+
+type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
+
+type idleAwareFramer struct {
+ f *spdy.Framer
+ conn *Connection
+ writeLock sync.Mutex
+ resetChan chan struct{}
+ setTimeoutLock sync.Mutex
+ setTimeoutChan chan time.Duration
+ timeout time.Duration
+}
+
+func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
+ iaf := &idleAwareFramer{
+ f: framer,
+ resetChan: make(chan struct{}, 2),
+ // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
+ // the same time the connection is being closed
+ setTimeoutChan: make(chan time.Duration, 1),
+ }
+ return iaf
+}
+
+func (i *idleAwareFramer) monitor() {
+ var (
+ timer *time.Timer
+ expired <-chan time.Time
+ resetChan = i.resetChan
+ setTimeoutChan = i.setTimeoutChan
+ )
+Loop:
+ for {
+ select {
+ case timeout := <-i.setTimeoutChan:
+ i.timeout = timeout
+ if timeout == 0 {
+ if timer != nil {
+ timer.Stop()
+ }
+ } else {
+ if timer == nil {
+ timer = time.NewTimer(timeout)
+ expired = timer.C
+ } else {
+ timer.Reset(timeout)
+ }
+ }
+ case <-resetChan:
+ if timer != nil && i.timeout > 0 {
+ timer.Reset(i.timeout)
+ }
+ case <-expired:
+ i.conn.streamCond.L.Lock()
+ streams := i.conn.streams
+ i.conn.streams = make(map[spdy.StreamId]*Stream)
+ i.conn.streamCond.Broadcast()
+ i.conn.streamCond.L.Unlock()
+ go func() {
+ for _, stream := range streams {
+ stream.resetStream()
+ }
+ i.conn.Close()
+ }()
+ case <-i.conn.closeChan:
+ if timer != nil {
+ timer.Stop()
+ }
+
+ // Start a goroutine to drain resetChan. This is needed because we've seen
+ // some unit tests with large numbers of goroutines get into a situation
+ // where resetChan fills up, at least 1 call to Write() is still trying to
+ // send to resetChan, the connection gets closed, and this case statement
+ // attempts to grab the write lock that Write() already has, causing a
+ // deadlock.
+ //
+ // See https://github.com/docker/spdystream/issues/49 for more details.
+ go func() {
+ for _ = range resetChan {
+ }
+ }()
+
+ go func() {
+ for _ = range setTimeoutChan {
+ }
+ }()
+
+ i.writeLock.Lock()
+ close(resetChan)
+ i.resetChan = nil
+ i.writeLock.Unlock()
+
+ i.setTimeoutLock.Lock()
+ close(i.setTimeoutChan)
+ i.setTimeoutChan = nil
+ i.setTimeoutLock.Unlock()
+
+ break Loop
+ }
+ }
+
+ // Drain resetChan
+ for _ = range resetChan {
+ }
+}
+
+func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
+ i.writeLock.Lock()
+ defer i.writeLock.Unlock()
+ if i.resetChan == nil {
+ return io.EOF
+ }
+ err := i.f.WriteFrame(frame)
+ if err != nil {
+ return err
+ }
+
+ i.resetChan <- struct{}{}
+
+ return nil
+}
+
+func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
+ frame, err := i.f.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+
+ // resetChan should never be closed since it is only closed
+ // when the connection has closed its closeChan. This closure
+ // only occurs after all Reads have finished
+ // TODO (dmcgowan): refactor relationship into connection
+ i.resetChan <- struct{}{}
+
+ return frame, nil
+}
+
+func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
+ i.setTimeoutLock.Lock()
+ defer i.setTimeoutLock.Unlock()
+
+ if i.setTimeoutChan == nil {
+ return
+ }
+
+ i.setTimeoutChan <- timeout
+}
+
+type Connection struct {
+ conn net.Conn
+ framer *idleAwareFramer
+
+ closeChan chan bool
+ goneAway bool
+ lastStreamChan chan<- *Stream
+ goAwayTimeout time.Duration
+ closeTimeout time.Duration
+
+ streamLock *sync.RWMutex
+ streamCond *sync.Cond
+ streams map[spdy.StreamId]*Stream
+
+ nextIdLock sync.Mutex
+ receiveIdLock sync.Mutex
+ nextStreamId spdy.StreamId
+ receivedStreamId spdy.StreamId
+
+ pingIdLock sync.Mutex
+ pingId uint32
+ pingChans map[uint32]chan error
+
+ shutdownLock sync.Mutex
+ shutdownChan chan error
+ hasShutdown bool
+
+ // for testing https://github.com/docker/spdystream/pull/56
+ dataFrameHandler func(*spdy.DataFrame) error
+}
+
+// NewConnection creates a new spdy connection from an existing
+// network connection.
+func NewConnection(conn net.Conn, server bool) (*Connection, error) {
+ framer, framerErr := spdy.NewFramer(conn, conn)
+ if framerErr != nil {
+ return nil, framerErr
+ }
+ idleAwareFramer := newIdleAwareFramer(framer)
+ var sid spdy.StreamId
+ var rid spdy.StreamId
+ var pid uint32
+ if server {
+ sid = 2
+ rid = 1
+ pid = 2
+ } else {
+ sid = 1
+ rid = 2
+ pid = 1
+ }
+
+ streamLock := new(sync.RWMutex)
+ streamCond := sync.NewCond(streamLock)
+
+ session := &Connection{
+ conn: conn,
+ framer: idleAwareFramer,
+
+ closeChan: make(chan bool),
+ goAwayTimeout: time.Duration(0),
+ closeTimeout: time.Duration(0),
+
+ streamLock: streamLock,
+ streamCond: streamCond,
+ streams: make(map[spdy.StreamId]*Stream),
+ nextStreamId: sid,
+ receivedStreamId: rid,
+
+ pingId: pid,
+ pingChans: make(map[uint32]chan error),
+
+ shutdownChan: make(chan error),
+ }
+ session.dataFrameHandler = session.handleDataFrame
+ idleAwareFramer.conn = session
+ go idleAwareFramer.monitor()
+
+ return session, nil
+}
+
+// Ping sends a ping frame across the connection and
+// returns the response time
+func (s *Connection) Ping() (time.Duration, error) {
+ pid := s.pingId
+ s.pingIdLock.Lock()
+ if s.pingId > 0x7ffffffe {
+ s.pingId = s.pingId - 0x7ffffffe
+ } else {
+ s.pingId = s.pingId + 2
+ }
+ s.pingIdLock.Unlock()
+ pingChan := make(chan error)
+ s.pingChans[pid] = pingChan
+ defer delete(s.pingChans, pid)
+
+ frame := &spdy.PingFrame{Id: pid}
+ startTime := time.Now()
+ writeErr := s.framer.WriteFrame(frame)
+ if writeErr != nil {
+ return time.Duration(0), writeErr
+ }
+ select {
+ case <-s.closeChan:
+ return time.Duration(0), errors.New("connection closed")
+ case err, ok := <-pingChan:
+ if ok && err != nil {
+ return time.Duration(0), err
+ }
+ break
+ }
+ return time.Now().Sub(startTime), nil
+}
+
+// Serve handles frames sent from the server, including reply frames
+// which are needed to fully initiate connections. Both clients and servers
+// should call Serve in a separate goroutine before creating streams.
+func (s *Connection) Serve(newHandler StreamHandler) {
+ // use a WaitGroup to wait for all frames to be drained after receiving
+ // go-away.
+ var wg sync.WaitGroup
+
+ // Parition queues to ensure stream frames are handled
+ // by the same worker, ensuring order is maintained
+ frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
+ for i := 0; i < FRAME_WORKERS; i++ {
+ frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
+
+ // Ensure frame queue is drained when connection is closed
+ go func(frameQueue *PriorityFrameQueue) {
+ <-s.closeChan
+ frameQueue.Drain()
+ }(frameQueues[i])
+
+ wg.Add(1)
+ go func(frameQueue *PriorityFrameQueue) {
+ // let the WaitGroup know this worker is done
+ defer wg.Done()
+
+ s.frameHandler(frameQueue, newHandler)
+ }(frameQueues[i])
+ }
+
+ var (
+ partitionRoundRobin int
+ goAwayFrame *spdy.GoAwayFrame
+ )
+Loop:
+ for {
+ readFrame, err := s.framer.ReadFrame()
+ if err != nil {
+ if err != io.EOF {
+ fmt.Errorf("frame read error: %s", err)
+ } else {
+ debugMessage("(%p) EOF received", s)
+ }
+ break
+ }
+ var priority uint8
+ var partition int
+ switch frame := readFrame.(type) {
+ case *spdy.SynStreamFrame:
+ if s.checkStreamFrame(frame) {
+ priority = frame.Priority
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
+ s.addStreamFrame(frame)
+ } else {
+ debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
+ continue
+ }
+ case *spdy.SynReplyFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.DataFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.RstStreamFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.HeadersFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.PingFrame:
+ priority = 0
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ case *spdy.GoAwayFrame:
+ // hold on to the go away frame and exit the loop
+ goAwayFrame = frame
+ break Loop
+ default:
+ priority = 7
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ }
+ frameQueues[partition].Push(readFrame, priority)
+ }
+ close(s.closeChan)
+
+ // wait for all frame handler workers to indicate they've drained their queues
+ // before handling the go away frame
+ wg.Wait()
+
+ if goAwayFrame != nil {
+ s.handleGoAwayFrame(goAwayFrame)
+ }
+
+ // now it's safe to close remote channels and empty s.streams
+ s.streamCond.L.Lock()
+ // notify streams that they're now closed, which will
+ // unblock any stream Read() calls
+ for _, stream := range s.streams {
+ stream.closeRemoteChannels()
+ }
+ s.streams = make(map[spdy.StreamId]*Stream)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
+ for {
+ popFrame := frameQueue.Pop()
+ if popFrame == nil {
+ return
+ }
+
+ var frameErr error
+ switch frame := popFrame.(type) {
+ case *spdy.SynStreamFrame:
+ frameErr = s.handleStreamFrame(frame, newHandler)
+ case *spdy.SynReplyFrame:
+ frameErr = s.handleReplyFrame(frame)
+ case *spdy.DataFrame:
+ frameErr = s.dataFrameHandler(frame)
+ case *spdy.RstStreamFrame:
+ frameErr = s.handleResetFrame(frame)
+ case *spdy.HeadersFrame:
+ frameErr = s.handleHeaderFrame(frame)
+ case *spdy.PingFrame:
+ frameErr = s.handlePingFrame(frame)
+ case *spdy.GoAwayFrame:
+ frameErr = s.handleGoAwayFrame(frame)
+ default:
+ frameErr = fmt.Errorf("unhandled frame type: %T", frame)
+ }
+
+ if frameErr != nil {
+ fmt.Errorf("frame handling error: %s", frameErr)
+ }
+ }
+}
+
+func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
+ stream, streamOk := s.getStream(streamId)
+ if !streamOk {
+ return 7
+ }
+ return stream.priority
+}
+
+func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
+ var parent *Stream
+ if frame.AssociatedToStreamId != spdy.StreamId(0) {
+ parent, _ = s.getStream(frame.AssociatedToStreamId)
+ }
+
+ stream := &Stream{
+ streamId: frame.StreamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: frame.Headers,
+ finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
+ replyCond: sync.NewCond(new(sync.Mutex)),
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ }
+ if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
+ stream.closeRemoteChannels()
+ }
+
+ s.addStream(stream)
+}
+
+// checkStreamFrame checks to see if a stream frame is allowed.
+// If the stream is invalid, then a reset frame with protocol error
+// will be returned.
+func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
+ s.receiveIdLock.Lock()
+ defer s.receiveIdLock.Unlock()
+ if s.goneAway {
+ return false
+ }
+ validationErr := s.validateStreamId(frame.StreamId)
+ if validationErr != nil {
+ go func() {
+ resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
+ if resetErr != nil {
+ fmt.Errorf("reset error: %s", resetErr)
+ }
+ }()
+ return false
+ }
+ return true
+}
+
+func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
+ stream, ok := s.getStream(frame.StreamId)
+ if !ok {
+ return fmt.Errorf("Missing stream: %d", frame.StreamId)
+ }
+
+ newHandler(stream)
+
+ return nil
+}
+
+func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
+ debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("Reply frame gone away for %d", frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if stream.replied {
+ // Stream has already received reply
+ return nil
+ }
+ stream.replied = true
+
+ // TODO Check for error
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ close(stream.startChan)
+
+ return nil
+}
+
+func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already been removed
+ return nil
+ }
+ s.removeStream(stream)
+ stream.closeRemoteChannels()
+
+ if !stream.replied {
+ stream.replied = true
+ stream.startChan <- ErrReset
+ close(stream.startChan)
+ }
+
+ stream.finishLock.Lock()
+ stream.finished = true
+ stream.finishLock.Unlock()
+
+ return nil
+}
+
+func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ // TODO limit headers while not blocking (use buffered chan or goroutine?)
+ select {
+ case <-stream.closeChan:
+ return nil
+ case stream.headerChan <- frame.Headers:
+ }
+
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ return nil
+}
+
+func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
+ debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
+ if len(frame.Data) > 0 {
+ stream.dataLock.RLock()
+ select {
+ case <-stream.closeChan:
+ debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
+ case stream.dataChan <- frame.Data:
+ debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
+ }
+ stream.dataLock.RUnlock()
+ }
+ if (frame.Flags & spdy.DataFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+ return nil
+}
+
+func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
+ if s.pingId&0x01 != frame.Id&0x01 {
+ return s.framer.WriteFrame(frame)
+ }
+ pingChan, pingOk := s.pingChans[frame.Id]
+ if pingOk {
+ close(pingChan)
+ }
+ return nil
+}
+
+func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
+ debugMessage("(%p) Go away received", s)
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ if s.lastStreamChan != nil {
+ stream, _ := s.getStream(frame.LastGoodStreamId)
+ go func() {
+ s.lastStreamChan <- stream
+ }()
+ }
+
+ // Do not block frame handler waiting for closure
+ go s.shutdown(s.goAwayTimeout)
+
+ return nil
+}
+
+func (s *Connection) remoteStreamFinish(stream *Stream) {
+ stream.closeRemoteChannels()
+
+ stream.finishLock.Lock()
+ if stream.finished {
+ // Stream is fully closed, cleanup
+ s.removeStream(stream)
+ }
+ stream.finishLock.Unlock()
+}
+
+// CreateStream creates a new spdy stream using the parameters for
+// creating the stream frame. The stream frame will be sent upon
+// calling this function, however this function does not wait for
+// the reply frame. If waiting for the reply is desired, use
+// the stream Wait or WaitTimeout function on the stream returned
+// by this function.
+func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
+ // MUST synchronize stream creation (all the way to writing the frame)
+ // as stream IDs **MUST** increase monotonically.
+ s.nextIdLock.Lock()
+ defer s.nextIdLock.Unlock()
+
+ streamId := s.getNextStreamId()
+ if streamId == 0 {
+ return nil, fmt.Errorf("Unable to get new stream id")
+ }
+
+ stream := &Stream{
+ streamId: streamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: headers,
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ }
+
+ debugMessage("(%p) (%p) Create stream", s, stream)
+
+ s.addStream(stream)
+
+ return stream, s.sendStream(stream, fin)
+}
+
+func (s *Connection) shutdown(closeTimeout time.Duration) {
+ // TODO Ensure this isn't called multiple times
+ s.shutdownLock.Lock()
+ if s.hasShutdown {
+ s.shutdownLock.Unlock()
+ return
+ }
+ s.hasShutdown = true
+ s.shutdownLock.Unlock()
+
+ var timeout <-chan time.Time
+ if closeTimeout > time.Duration(0) {
+ timeout = time.After(closeTimeout)
+ }
+ streamsClosed := make(chan bool)
+
+ go func() {
+ s.streamCond.L.Lock()
+ for len(s.streams) > 0 {
+ debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
+ s.streamCond.Wait()
+ }
+ s.streamCond.L.Unlock()
+ close(streamsClosed)
+ }()
+
+ var err error
+ select {
+ case <-streamsClosed:
+ // No active streams, close should be safe
+ err = s.conn.Close()
+ case <-timeout:
+ // Force ungraceful close
+ err = s.conn.Close()
+ // Wait for cleanup to clear active streams
+ <-streamsClosed
+ }
+
+ if err != nil {
+ duration := 10 * time.Minute
+ time.AfterFunc(duration, func() {
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ fmt.Errorf("Unhandled close error after %s: %s", duration, err)
+ }
+ default:
+ }
+ })
+ s.shutdownChan <- err
+ }
+ close(s.shutdownChan)
+
+ return
+}
+
+// Closes spdy connection by sending GoAway frame and initiating shutdown
+func (s *Connection) Close() error {
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ var lastStreamId spdy.StreamId
+ if s.receivedStreamId > 2 {
+ lastStreamId = s.receivedStreamId - 2
+ }
+
+ goAwayFrame := &spdy.GoAwayFrame{
+ LastGoodStreamId: lastStreamId,
+ Status: spdy.GoAwayOK,
+ }
+
+ err := s.framer.WriteFrame(goAwayFrame)
+ if err != nil {
+ return err
+ }
+
+ go s.shutdown(s.closeTimeout)
+
+ return nil
+}
+
+// CloseWait closes the connection and waits for shutdown
+// to finish. Note the underlying network Connection
+// is not closed until the end of shutdown.
+func (s *Connection) CloseWait() error {
+ closeErr := s.Close()
+ if closeErr != nil {
+ return closeErr
+ }
+ shutdownErr, ok := <-s.shutdownChan
+ if ok {
+ return shutdownErr
+ }
+ return nil
+}
+
+// Wait waits for the connection to finish shutdown or for
+// the wait timeout duration to expire. This needs to be
+// called either after Close has been called or the GOAWAYFRAME
+// has been received. If the wait timeout is 0, this function
+// will block until shutdown finishes. If wait is never called
+// and a shutdown error occurs, that error will be logged as an
+// unhandled error.
+func (s *Connection) Wait(waitTimeout time.Duration) error {
+ var timeout <-chan time.Time
+ if waitTimeout > time.Duration(0) {
+ timeout = time.After(waitTimeout)
+ }
+
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ return err
+ }
+ case <-timeout:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// NotifyClose registers a channel to be called when the remote
+// peer inidicates connection closure. The last stream to be
+// received by the remote will be sent on the channel. The notify
+// timeout will determine the duration between go away received
+// and the connection being closed.
+func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
+ s.goAwayTimeout = timeout
+ s.lastStreamChan = c
+}
+
+// SetCloseTimeout sets the amount of time close will wait for
+// streams to finish before terminating the underlying network
+// connection. Setting the timeout to 0 will cause close to
+// wait forever, which is the default.
+func (s *Connection) SetCloseTimeout(timeout time.Duration) {
+ s.closeTimeout = timeout
+}
+
+// SetIdleTimeout sets the amount of time the connection may sit idle before
+// it is forcefully terminated.
+func (s *Connection) SetIdleTimeout(timeout time.Duration) {
+ s.framer.setIdleTimeout(timeout)
+}
+
+func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ headerFrame := &spdy.HeadersFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(headerFrame)
+}
+
+func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ replyFrame := &spdy.SynReplyFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(replyFrame)
+}
+
+func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: streamId,
+ Status: status,
+ }
+
+ return s.framer.WriteFrame(resetFrame)
+}
+
+func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
+ return s.sendResetFrame(status, stream.streamId)
+}
+
+func (s *Connection) sendStream(stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ stream.finished = true
+ }
+
+ var parentId spdy.StreamId
+ if stream.parent != nil {
+ parentId = stream.parent.streamId
+ }
+
+ streamFrame := &spdy.SynStreamFrame{
+ StreamId: spdy.StreamId(stream.streamId),
+ AssociatedToStreamId: spdy.StreamId(parentId),
+ Headers: stream.headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(streamFrame)
+}
+
+// getNextStreamId returns the next sequential id
+// every call should produce a unique value or an error
+func (s *Connection) getNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ if sid > 0x7fffffff {
+ return 0
+ }
+ s.nextStreamId = s.nextStreamId + 2
+ return sid
+}
+
+// PeekNextStreamId returns the next sequential id and keeps the next id untouched
+func (s *Connection) PeekNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ return sid
+}
+
+func (s *Connection) validateStreamId(rid spdy.StreamId) error {
+ if rid > 0x7fffffff || rid < s.receivedStreamId {
+ return ErrInvalidStreamId
+ }
+ s.receivedStreamId = rid + 2
+ return nil
+}
+
+func (s *Connection) addStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ s.streams[stream.streamId] = stream
+ debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) removeStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ delete(s.streams, stream.streamId)
+ debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
+ s.streamLock.RLock()
+ stream, ok = s.streams[streamId]
+ s.streamLock.RUnlock()
+ return
+}
+
+// FindStream looks up the given stream id and either waits for the
+// stream to be found or returns nil if the stream id is no longer
+// valid.
+func (s *Connection) FindStream(streamId uint32) *Stream {
+ var stream *Stream
+ var ok bool
+ s.streamCond.L.Lock()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
+ for !ok && streamId >= uint32(s.receivedStreamId) {
+ s.streamCond.Wait()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ }
+ s.streamCond.L.Unlock()
+ return stream
+}
+
+func (s *Connection) CloseChan() <-chan bool {
+ return s.closeChan
+}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
new file mode 100644
index 0000000..b59fa5f
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/handlers.go
@@ -0,0 +1,38 @@
+package spdystream
+
+import (
+ "io"
+ "net/http"
+)
+
+// MirrorStreamHandler mirrors all streams.
+func MirrorStreamHandler(stream *Stream) {
+ replyErr := stream.SendReply(http.Header{}, false)
+ if replyErr != nil {
+ return
+ }
+
+ go func() {
+ io.Copy(stream, stream)
+ stream.Close()
+ }()
+ go func() {
+ for {
+ header, receiveErr := stream.ReceiveHeader()
+ if receiveErr != nil {
+ return
+ }
+ sendErr := stream.SendHeader(header, false)
+ if sendErr != nil {
+ return
+ }
+ }
+ }()
+}
+
+// NoopStreamHandler does nothing when stream connects, most
+// likely used with RejectAuthHandler which will not allow any
+// streams to make it to the stream handler.
+func NoOpStreamHandler(stream *Stream) {
+ stream.SendReply(http.Header{}, false)
+}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
new file mode 100644
index 0000000..fc8582b
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/priority.go
@@ -0,0 +1,98 @@
+package spdystream
+
+import (
+ "container/heap"
+ "sync"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+type prioritizedFrame struct {
+ frame spdy.Frame
+ priority uint8
+ insertId uint64
+}
+
+type frameQueue []*prioritizedFrame
+
+func (fq frameQueue) Len() int {
+ return len(fq)
+}
+
+func (fq frameQueue) Less(i, j int) bool {
+ if fq[i].priority == fq[j].priority {
+ return fq[i].insertId < fq[j].insertId
+ }
+ return fq[i].priority < fq[j].priority
+}
+
+func (fq frameQueue) Swap(i, j int) {
+ fq[i], fq[j] = fq[j], fq[i]
+}
+
+func (fq *frameQueue) Push(x interface{}) {
+ *fq = append(*fq, x.(*prioritizedFrame))
+}
+
+func (fq *frameQueue) Pop() interface{} {
+ old := *fq
+ n := len(old)
+ *fq = old[0 : n-1]
+ return old[n-1]
+}
+
+type PriorityFrameQueue struct {
+ queue *frameQueue
+ c *sync.Cond
+ size int
+ nextInsertId uint64
+ drain bool
+}
+
+func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
+ queue := make(frameQueue, 0, size)
+ heap.Init(&queue)
+
+ return &PriorityFrameQueue{
+ queue: &queue,
+ size: size,
+ c: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() >= q.size {
+ q.c.Wait()
+ }
+ pFrame := &prioritizedFrame{
+ frame: frame,
+ priority: priority,
+ insertId: q.nextInsertId,
+ }
+ q.nextInsertId = q.nextInsertId + 1
+ heap.Push(q.queue, pFrame)
+ q.c.Signal()
+}
+
+func (q *PriorityFrameQueue) Pop() spdy.Frame {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() == 0 {
+ if q.drain {
+ return nil
+ }
+ q.c.Wait()
+ }
+ frame := heap.Pop(q.queue).(*prioritizedFrame).frame
+ q.c.Signal()
+ return frame
+}
+
+func (q *PriorityFrameQueue) Drain() {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ q.drain = true
+ q.c.Broadcast()
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
new file mode 100644
index 0000000..5a5ff0e
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
+var headerDictionary = []byte{
+ 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
+ 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
+ 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
+ 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
+ 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
+ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
+ 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
+ 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
+ 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
+ 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+ 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
+ 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
+ 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
+ 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
+ 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
+ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
+ 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
+ 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
+ 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
+ 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
+ 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
+ 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
+ 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+ 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
+ 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
+ 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
+ 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
+ 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
+ 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
+ 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
+ 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
+ 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
+ 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
+ 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
+ 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
+ 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
+ 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
+ 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
+ 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
+ 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
+ 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
+ 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
+ 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
+ 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
+ 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
+ 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
+ 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
+ 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
+ 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
+ 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
+ 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
+ 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
+ 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
+ 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
+ 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
+ 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
+ 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
+ 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
+ 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
+ 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
+ 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
+ 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
+ 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
+ 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
+ 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
+ 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
+ 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
+ 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
+ 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
+ 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
+ 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
+ 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
+ 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
+ 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
+ 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
+ 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
+ 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
+ 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
+ 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
+ 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
+ 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
+ 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
+ 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
+ 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
+ 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
+ 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
+ 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
+ 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
+ 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
+ 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
+ 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
+ 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
+ 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
+ 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
+ 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
+ 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
+ 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
+ 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
+ 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
+ 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
new file mode 100644
index 0000000..9359a95
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/read.go
@@ -0,0 +1,348 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "compress/zlib"
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynStreamFrame(h, frame)
+}
+
+func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynReplyFrame(h, frame)
+}
+
+func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ var numSettings uint32
+ if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
+ return err
+ }
+ frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
+ for i := uint32(0); i < numSettings; i++ {
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
+ return err
+ }
+ frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
+ frame.FlagIdValues[i].Id &= 0xffffff
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
+ return err
+ }
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, StreamId(frame.Id)}
+ }
+ return nil
+}
+
+func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readHeadersFrame(h, frame)
+}
+
+func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
+ ctor, ok := cframeCtor[frameType]
+ if !ok {
+ return nil, &Error{Err: InvalidControlFrame}
+ }
+ return ctor(), nil
+}
+
+var cframeCtor = map[ControlFrameType]func() controlFrame{
+ TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
+ TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
+ TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
+ TypeSettings: func() controlFrame { return new(SettingsFrame) },
+ TypePing: func() controlFrame { return new(PingFrame) },
+ TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
+ TypeHeaders: func() controlFrame { return new(HeadersFrame) },
+ TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
+}
+
+func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
+ if f.headerDecompressor != nil {
+ f.headerReader.N = payloadSize
+ return nil
+ }
+ f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
+ decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
+ if err != nil {
+ return err
+ }
+ f.headerDecompressor = decompressor
+ return nil
+}
+
+// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
+func (f *Framer) ReadFrame() (Frame, error) {
+ var firstWord uint32
+ if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
+ return nil, err
+ }
+ if firstWord&0x80000000 != 0 {
+ frameType := ControlFrameType(firstWord & 0xffff)
+ version := uint16(firstWord >> 16 & 0x7fff)
+ return f.parseControlFrame(version, frameType)
+ }
+ return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
+}
+
+func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ flags := ControlFlags((length & 0xff000000) >> 24)
+ length &= 0xffffff
+ header := ControlFrameHeader{version, frameType, flags, length}
+ cframe, err := newControlFrame(frameType)
+ if err != nil {
+ return nil, err
+ }
+ if err = cframe.read(header, f); err != nil {
+ return nil, err
+ }
+ return cframe, nil
+}
+
+func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
+ var numHeaders uint32
+ if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
+ return nil, err
+ }
+ var e error
+ h := make(http.Header, int(numHeaders))
+ for i := 0; i < int(numHeaders); i++ {
+ var length uint32
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ nameBytes := make([]byte, length)
+ if _, err := io.ReadFull(r, nameBytes); err != nil {
+ return nil, err
+ }
+ name := string(nameBytes)
+ if name != strings.ToLower(name) {
+ e = &Error{UnlowercasedHeaderName, streamId}
+ name = strings.ToLower(name)
+ }
+ if h[name] != nil {
+ e = &Error{DuplicateHeaders, streamId}
+ }
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ value := make([]byte, length)
+ if _, err := io.ReadFull(r, value); err != nil {
+ return nil, err
+ }
+ valueList := strings.Split(string(value), headerValueSeparator)
+ for _, v := range valueList {
+ h.Add(name, v)
+ }
+ }
+ if e != nil {
+ return h, e
+ }
+ return h, nil
+}
+
+func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
+ return err
+ }
+ frame.Priority >>= 5
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 10))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidReqHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidRespHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ var invalidHeaders map[string]bool
+ if frame.StreamId%2 == 0 {
+ invalidHeaders = invalidReqHeaders
+ } else {
+ invalidHeaders = invalidRespHeaders
+ }
+ for h := range frame.Headers {
+ if invalidHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ var frame DataFrame
+ frame.StreamId = streamId
+ frame.Flags = DataFlags(length >> 24)
+ length &= 0xffffff
+ frame.Data = make([]byte, length)
+ if _, err := io.ReadFull(f.r, frame.Data); err != nil {
+ return nil, err
+ }
+ if frame.StreamId == 0 {
+ return nil, &Error{ZeroStreamId, 0}
+ }
+ return &frame, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
new file mode 100644
index 0000000..7b6ee9c
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/types.go
@@ -0,0 +1,275 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spdy implements the SPDY protocol (currently SPDY/3), described in
+// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
+package spdy
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io"
+ "net/http"
+)
+
+// Version is the protocol version number that this package implements.
+const Version = 3
+
+// ControlFrameType stores the type field in a control frame header.
+type ControlFrameType uint16
+
+const (
+ TypeSynStream ControlFrameType = 0x0001
+ TypeSynReply = 0x0002
+ TypeRstStream = 0x0003
+ TypeSettings = 0x0004
+ TypePing = 0x0006
+ TypeGoAway = 0x0007
+ TypeHeaders = 0x0008
+ TypeWindowUpdate = 0x0009
+)
+
+// ControlFlags are the flags that can be set on a control frame.
+type ControlFlags uint8
+
+const (
+ ControlFlagFin ControlFlags = 0x01
+ ControlFlagUnidirectional = 0x02
+ ControlFlagSettingsClearSettings = 0x01
+)
+
+// DataFlags are the flags that can be set on a data frame.
+type DataFlags uint8
+
+const (
+ DataFlagFin DataFlags = 0x01
+)
+
+// MaxDataLength is the maximum number of bytes that can be stored in one frame.
+const MaxDataLength = 1<<24 - 1
+
+// headerValueSepator separates multiple header values.
+const headerValueSeparator = "\x00"
+
+// Frame is a single SPDY frame in its unpacked in-memory representation. Use
+// Framer to read and write it.
+type Frame interface {
+ write(f *Framer) error
+}
+
+// ControlFrameHeader contains all the fields in a control frame header,
+// in its unpacked in-memory representation.
+type ControlFrameHeader struct {
+ // Note, high bit is the "Control" bit.
+ version uint16 // spdy version number
+ frameType ControlFrameType
+ Flags ControlFlags
+ length uint32 // length of data field
+}
+
+type controlFrame interface {
+ Frame
+ read(h ControlFrameHeader, f *Framer) error
+}
+
+// StreamId represents a 31-bit value identifying the stream.
+type StreamId uint32
+
+// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
+// frame.
+type SynStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
+ Priority uint8 // priority of this frame (3-bit)
+ Slot uint8 // index in the server's credential vector of the client certificate
+ Headers http.Header
+}
+
+// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
+type SynReplyFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// RstStreamStatus represents the status that led to a RST_STREAM.
+type RstStreamStatus uint32
+
+const (
+ ProtocolError RstStreamStatus = iota + 1
+ InvalidStream
+ RefusedStream
+ UnsupportedVersion
+ Cancel
+ InternalError
+ FlowControlError
+ StreamInUse
+ StreamAlreadyClosed
+ InvalidCredentials
+ FrameTooLarge
+)
+
+// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
+// frame.
+type RstStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Status RstStreamStatus
+}
+
+// SettingsFlag represents a flag in a SETTINGS frame.
+type SettingsFlag uint8
+
+const (
+ FlagSettingsPersistValue SettingsFlag = 0x1
+ FlagSettingsPersisted = 0x2
+)
+
+// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
+type SettingsId uint32
+
+const (
+ SettingsUploadBandwidth SettingsId = iota + 1
+ SettingsDownloadBandwidth
+ SettingsRoundTripTime
+ SettingsMaxConcurrentStreams
+ SettingsCurrentCwnd
+ SettingsDownloadRetransRate
+ SettingsInitialWindowSize
+ SettingsClientCretificateVectorSize
+)
+
+// SettingsFlagIdValue is the unpacked, in-memory representation of the
+// combined flag/id/value for a setting in a SETTINGS frame.
+type SettingsFlagIdValue struct {
+ Flag SettingsFlag
+ Id SettingsId
+ Value uint32
+}
+
+// SettingsFrame is the unpacked, in-memory representation of a SPDY
+// SETTINGS frame.
+type SettingsFrame struct {
+ CFHeader ControlFrameHeader
+ FlagIdValues []SettingsFlagIdValue
+}
+
+// PingFrame is the unpacked, in-memory representation of a PING frame.
+type PingFrame struct {
+ CFHeader ControlFrameHeader
+ Id uint32 // unique id for this ping, from server is even, from client is odd.
+}
+
+// GoAwayStatus represents the status in a GoAwayFrame.
+type GoAwayStatus uint32
+
+const (
+ GoAwayOK GoAwayStatus = iota
+ GoAwayProtocolError
+ GoAwayInternalError
+)
+
+// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
+type GoAwayFrame struct {
+ CFHeader ControlFrameHeader
+ LastGoodStreamId StreamId // last stream id which was accepted by sender
+ Status GoAwayStatus
+}
+
+// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
+type HeadersFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// WindowUpdateFrame is the unpacked, in-memory representation of a
+// WINDOW_UPDATE frame.
+type WindowUpdateFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ DeltaWindowSize uint32 // additional number of bytes to existing window size
+}
+
+// TODO: Implement credential frame and related methods.
+
+// DataFrame is the unpacked, in-memory representation of a DATA frame.
+type DataFrame struct {
+ // Note, high bit is the "Control" bit. Should be 0 for data frames.
+ StreamId StreamId
+ Flags DataFlags
+ Data []byte // payload data of this frame
+}
+
+// A SPDY specific error.
+type ErrorCode string
+
+const (
+ UnlowercasedHeaderName ErrorCode = "header was not lowercased"
+ DuplicateHeaders = "multiple headers with same name"
+ WrongCompressedPayloadSize = "compressed payload size was incorrect"
+ UnknownFrameType = "unknown frame type"
+ InvalidControlFrame = "invalid control frame"
+ InvalidDataFrame = "invalid data frame"
+ InvalidHeaderPresent = "frame contained invalid header"
+ ZeroStreamId = "stream id zero is disallowed"
+)
+
+// Error contains both the type of error and additional values. StreamId is 0
+// if Error is not associated with a stream.
+type Error struct {
+ Err ErrorCode
+ StreamId StreamId
+}
+
+func (e *Error) Error() string {
+ return string(e.Err)
+}
+
+var invalidReqHeaders = map[string]bool{
+ "Connection": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+var invalidRespHeaders = map[string]bool{
+ "Connection": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+// Framer handles serializing/deserializing SPDY frames, including compressing/
+// decompressing payloads.
+type Framer struct {
+ headerCompressionDisabled bool
+ w io.Writer
+ headerBuf *bytes.Buffer
+ headerCompressor *zlib.Writer
+ r io.Reader
+ headerReader io.LimitedReader
+ headerDecompressor io.ReadCloser
+}
+
+// NewFramer allocates a new Framer for a given SPDY connection, represented by
+// a io.Writer and io.Reader. Note that Framer will read and write individual fields
+// from/to the Reader and Writer, so the caller should pass in an appropriately
+// buffered implementation to optimize performance.
+func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
+ compressBuf := new(bytes.Buffer)
+ compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
+ if err != nil {
+ return nil, err
+ }
+ framer := &Framer{
+ w: w,
+ headerBuf: compressBuf,
+ headerCompressor: compressor,
+ r: r,
+ }
+ return framer, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
new file mode 100644
index 0000000..b212f66
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/write.go
@@ -0,0 +1,318 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) write(f *Framer) error {
+ return f.writeSynStreamFrame(frame)
+}
+
+func (frame *SynReplyFrame) write(f *Framer) error {
+ return f.writeSynReplyFrame(frame)
+}
+
+func (frame *RstStreamFrame) write(f *Framer) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeRstStream
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *SettingsFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSettings
+ frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
+ return
+ }
+ for _, flagIdValue := range frame.FlagIdValues {
+ flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
+ if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (frame *PingFrame) write(f *Framer) (err error) {
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypePing
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 4
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *GoAwayFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeGoAway
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) write(f *Framer) error {
+ return f.writeHeadersFrame(frame)
+}
+
+func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeWindowUpdate
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *DataFrame) write(f *Framer) error {
+ return f.writeDataFrame(frame)
+}
+
+// WriteFrame writes a frame.
+func (f *Framer) WriteFrame(frame Frame) error {
+ return frame.write(f)
+}
+
+func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
+ if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
+ return err
+ }
+ if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
+ return err
+ }
+ flagsAndLength := uint32(h.Flags)<<24 | h.length
+ if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
+ n = 0
+ if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
+ return
+ }
+ n += 2
+ for name, values := range h {
+ if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
+ return
+ }
+ n += 2
+ name = strings.ToLower(name)
+ if _, err = io.WriteString(w, name); err != nil {
+ return
+ }
+ n += len(name)
+ v := strings.Join(values, headerValueSeparator)
+ if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
+ return
+ }
+ n += 2
+ if _, err = io.WriteString(w, v); err != nil {
+ return
+ }
+ n += len(v)
+ }
+ return
+}
+
+func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynStream
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
+ return err
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return err
+ }
+ f.headerBuf.Reset()
+ return nil
+}
+
+func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynReply
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeHeaders
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
+ return &Error{InvalidDataFrame, frame.StreamId}
+ }
+
+ // Serialize frame to Writer.
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
+ if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
+ return
+ }
+ if _, err = f.w.Write(frame.Data); err != nil {
+ return
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
new file mode 100644
index 0000000..f9e9ee2
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/stream.go
@@ -0,0 +1,327 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrUnreadPartialData = errors.New("unread partial data")
+)
+
+type Stream struct {
+ streamId spdy.StreamId
+ parent *Stream
+ conn *Connection
+ startChan chan error
+
+ dataLock sync.RWMutex
+ dataChan chan []byte
+ unread []byte
+
+ priority uint8
+ headers http.Header
+ headerChan chan http.Header
+ finishLock sync.Mutex
+ finished bool
+ replyCond *sync.Cond
+ replied bool
+ closeLock sync.Mutex
+ closeChan chan bool
+}
+
+// WriteData writes data to stream, sending a dataframe per call
+func (s *Stream) WriteData(data []byte, fin bool) error {
+ s.waitWriteReply()
+ var flags spdy.DataFlags
+
+ if fin {
+ flags = spdy.DataFlagFin
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return ErrWriteClosedStream
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+ }
+
+ dataFrame := &spdy.DataFrame{
+ StreamId: s.streamId,
+ Flags: flags,
+ Data: data,
+ }
+
+ debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
+ return s.conn.framer.WriteFrame(dataFrame)
+}
+
+// Write writes bytes to a stream, calling write data for each call.
+func (s *Stream) Write(data []byte) (n int, err error) {
+ err = s.WriteData(data, false)
+ if err == nil {
+ n = len(data)
+ }
+ return
+}
+
+// Read reads bytes from a stream, a single read will never get more
+// than what is sent on a single data frame, but a multiple calls to
+// read may get data from the same data frame.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ if s.unread == nil {
+ select {
+ case <-s.closeChan:
+ return 0, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return 0, io.EOF
+ }
+ s.unread = read
+ }
+ }
+ n = copy(p, s.unread)
+ if n < len(s.unread) {
+ s.unread = s.unread[n:]
+ } else {
+ s.unread = nil
+ }
+ return
+}
+
+// ReadData reads an entire data frame and returns the byte array
+// from the data frame. If there is unread data from the result
+// of a Read call, this function will return an ErrUnreadPartialData.
+func (s *Stream) ReadData() ([]byte, error) {
+ debugMessage("(%p) Reading data from %d", s, s.streamId)
+ if s.unread != nil {
+ return nil, ErrUnreadPartialData
+ }
+ select {
+ case <-s.closeChan:
+ return nil, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return nil, io.EOF
+ }
+ return read, nil
+ }
+}
+
+func (s *Stream) waitWriteReply() {
+ if s.replyCond != nil {
+ s.replyCond.L.Lock()
+ for !s.replied {
+ s.replyCond.Wait()
+ }
+ s.replyCond.L.Unlock()
+ }
+}
+
+// Wait waits for the stream to receive a reply.
+func (s *Stream) Wait() error {
+ return s.WaitTimeout(time.Duration(0))
+}
+
+// WaitTimeout waits for the stream to receive a reply or for timeout.
+// When the timeout is reached, ErrTimeout will be returned.
+func (s *Stream) WaitTimeout(timeout time.Duration) error {
+ var timeoutChan <-chan time.Time
+ if timeout > time.Duration(0) {
+ timeoutChan = time.After(timeout)
+ }
+
+ select {
+ case err := <-s.startChan:
+ if err != nil {
+ return err
+ }
+ break
+ case <-timeoutChan:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// Close closes the stream by sending an empty data frame with the
+// finish flag set, indicating this side is finished with the stream.
+func (s *Stream) Close() error {
+ select {
+ case <-s.closeChan:
+ // Stream is now fully closed
+ s.conn.removeStream(s)
+ default:
+ break
+ }
+ return s.WriteData([]byte{}, true)
+}
+
+// Reset sends a reset frame, putting the stream into the fully closed state.
+func (s *Stream) Reset() error {
+ s.conn.removeStream(s)
+ return s.resetStream()
+}
+
+func (s *Stream) resetStream() error {
+ // Always call closeRemoteChannels, even if s.finished is already true.
+ // This makes it so that stream.Close() followed by stream.Reset() allows
+ // stream.Read() to unblock.
+ s.closeRemoteChannels()
+
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return nil
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: s.streamId,
+ Status: spdy.Cancel,
+ }
+ return s.conn.framer.WriteFrame(resetFrame)
+}
+
+// CreateSubStream creates a stream using the current as the parent
+func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
+ return s.conn.CreateStream(headers, s, fin)
+}
+
+// SetPriority sets the stream priority, does not affect the
+// remote priority of this stream after Open has been called.
+// Valid values are 0 through 7, 0 being the highest priority
+// and 7 the lowest.
+func (s *Stream) SetPriority(priority uint8) {
+ s.priority = priority
+}
+
+// SendHeader sends a header frame across the stream
+func (s *Stream) SendHeader(headers http.Header, fin bool) error {
+ return s.conn.sendHeaders(headers, s, fin)
+}
+
+// SendReply sends a reply on a stream, only valid to be called once
+// when handling a new stream
+func (s *Stream) SendReply(headers http.Header, fin bool) error {
+ if s.replyCond == nil {
+ return errors.New("cannot reply on initiated stream")
+ }
+ s.replyCond.L.Lock()
+ defer s.replyCond.L.Unlock()
+ if s.replied {
+ return nil
+ }
+
+ err := s.conn.sendReply(headers, s, fin)
+ if err != nil {
+ return err
+ }
+
+ s.replied = true
+ s.replyCond.Broadcast()
+ return nil
+}
+
+// Refuse sends a reset frame with the status refuse, only
+// valid to be called once when handling a new stream. This
+// may be used to indicate that a stream is not allowed
+// when http status codes are not being used.
+func (s *Stream) Refuse() error {
+ if s.replied {
+ return nil
+ }
+ s.replied = true
+ return s.conn.sendReset(spdy.RefusedStream, s)
+}
+
+// Cancel sends a reset frame with the status canceled. This
+// can be used at any time by the creator of the Stream to
+// indicate the stream is no longer needed.
+func (s *Stream) Cancel() error {
+ return s.conn.sendReset(spdy.Cancel, s)
+}
+
+// ReceiveHeader receives a header sent on the other side
+// of the stream. This function will block until a header
+// is received or stream is closed.
+func (s *Stream) ReceiveHeader() (http.Header, error) {
+ select {
+ case <-s.closeChan:
+ break
+ case header, ok := <-s.headerChan:
+ if !ok {
+ return nil, fmt.Errorf("header chan closed")
+ }
+ return header, nil
+ }
+ return nil, fmt.Errorf("stream closed")
+}
+
+// Parent returns the parent stream
+func (s *Stream) Parent() *Stream {
+ return s.parent
+}
+
+// Headers returns the headers used to create the stream
+func (s *Stream) Headers() http.Header {
+ return s.headers
+}
+
+// String returns the string version of stream using the
+// streamId to uniquely identify the stream
+func (s *Stream) String() string {
+ return fmt.Sprintf("stream:%d", s.streamId)
+}
+
+// Identifier returns a 32 bit identifier for the stream
+func (s *Stream) Identifier() uint32 {
+ return uint32(s.streamId)
+}
+
+// IsFinished returns whether the stream has finished
+// sending data
+func (s *Stream) IsFinished() bool {
+ return s.finished
+}
+
+// Implement net.Conn interface
+
+func (s *Stream) LocalAddr() net.Addr {
+ return s.conn.conn.LocalAddr()
+}
+
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.conn.conn.RemoteAddr()
+}
+
+// TODO set per stream values instead of connection-wide
+
+func (s *Stream) SetDeadline(t time.Time) error {
+ return s.conn.conn.SetDeadline(t)
+}
+
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ return s.conn.conn.SetReadDeadline(t)
+}
+
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ return s.conn.conn.SetWriteDeadline(t)
+}
+
+func (s *Stream) closeRemoteChannels() {
+ s.closeLock.Lock()
+ defer s.closeLock.Unlock()
+ select {
+ case <-s.closeChan:
+ default:
+ close(s.closeChan)
+ }
+}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
new file mode 100644
index 0000000..1b2c199
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/utils.go
@@ -0,0 +1,16 @@
+package spdystream
+
+import (
+ "log"
+ "os"
+)
+
+var (
+ DEBUG = os.Getenv("DEBUG")
+)
+
+func debugMessage(fmt string, args ...interface{}) {
+ if DEBUG != "" {
+ log.Printf(fmt, args...)
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore
new file mode 100644
index 0000000..cece7be
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/.gitignore
@@ -0,0 +1,70 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+restful.html
+
+*.out
+
+tmp.prof
+
+go-restful.test
+
+examples/restful-basic-authentication
+
+examples/restful-encoding-filter
+
+examples/restful-filters
+
+examples/restful-hello-world
+
+examples/restful-resource-functions
+
+examples/restful-serve-static
+
+examples/restful-user-service
+
+*.DS_Store
+examples/restful-user-resource
+
+examples/restful-multi-containers
+
+examples/restful-form-handling
+
+examples/restful-CORS-filter
+
+examples/restful-options-filter
+
+examples/restful-curly-router
+
+examples/restful-cpuprofiler-service
+
+examples/restful-pre-post-filters
+
+curly.prof
+
+examples/restful-NCSA-logging
+
+examples/restful-html-template
+
+s.html
+restful-path-tail
diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml
new file mode 100644
index 0000000..b22f8f5
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.x
+
+script: go test -v
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md
new file mode 100644
index 0000000..e525296
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/CHANGES.md
@@ -0,0 +1,273 @@
+## Change history of go-restful
+
+
+v2.9.5
+- fix panic in Response.WriteError if err == nil
+
+v2.9.4
+
+- fix issue #400 , parsing mime type quality
+- Route Builder added option for contentEncodingEnabled (#398)
+
+v2.9.3
+
+- Avoid return of 415 Unsupported Media Type when request body is empty (#396)
+
+v2.9.2
+
+- Reduce allocations in per-request methods to improve performance (#395)
+
+v2.9.1
+
+- Fix issue with default responses and invalid status code 0. (#393)
+
+v2.9.0
+
+- add per Route content encoding setting (overrides container setting)
+
+v2.8.0
+
+- add Request.QueryParameters()
+- add json-iterator (via build tag)
+- disable vgo module (until log is moved)
+
+v2.7.1
+
+- add vgo module
+
+v2.6.1
+
+- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
+
+v2.6.0
+
+- Make JSR 311 routing and path param processing consistent
+- Adding description to RouteBuilder.Reads()
+- Update example for Swagger12 and OpenAPI
+
+2017-09-13
+
+- added route condition functions using `.If(func)` in route building.
+
+2017-02-16
+
+- solved issue #304, make operation names unique
+
+2017-01-30
+
+ [IMPORTANT] For swagger users, change your import statement to:
+ swagger "github.com/emicklei/go-restful-swagger12"
+
+- moved swagger 1.2 code to go-restful-swagger12
+- created TAG 2.0.0
+
+2017-01-27
+
+- remove defer request body close
+- expose Dispatch for testing filters and Routefunctions
+- swagger response model cannot be array
+- created TAG 1.0.0
+
+2016-12-22
+
+- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
+
+2016-11-26
+
+- Default change! now use CurlyRouter (was RouterJSR311)
+- Default change! no more caching of request content
+- Default change! do not recover from panics
+
+2016-09-22
+
+- fix the DefaultRequestContentType feature
+
+2016-02-14
+
+- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
+- add constructors for custom entity accessors for xml and json
+
+2015-09-27
+
+- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
+
+2015-09-25
+
+- fixed problem with changing Header after WriteHeader (issue 235)
+
+2015-09-14
+
+- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
+- added support for custom EntityReaderWriters.
+
+2015-08-06
+
+- add support for reading entities from compressed request content
+- use sync.Pool for compressors of http response and request body
+- add Description to Parameter for documentation in Swagger UI
+
+2015-03-20
+
+- add configurable logging
+
+2015-03-18
+
+- if not specified, the Operation is derived from the Route function
+
+2015-03-17
+
+- expose Parameter creation functions
+- make trace logger an interface
+- fix OPTIONSFilter
+- customize rendering of ServiceError
+- JSR311 router now handles wildcards
+- add Notes to Route
+
+2014-11-27
+
+- (api add) PrettyPrint per response. (as proposed in #167)
+
+2014-11-12
+
+- (api add) ApiVersion(.) for documentation in Swagger UI
+
+2014-11-10
+
+- (api change) struct fields tagged with "description" show up in Swagger UI
+
+2014-10-31
+
+- (api change) ReturnsError -> Returns
+- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
+- fix swagger nested structs
+- sort Swagger response messages by code
+
+2014-10-23
+
+- (api add) ReturnsError allows you to document Http codes in swagger
+- fixed problem with greedy CurlyRouter
+- (api add) Access-Control-Max-Age in CORS
+- add tracing functionality (injectable) for debugging purposes
+- support JSON parse 64bit int
+- fix empty parameters for swagger
+- WebServicesUrl is now optional for swagger
+- fixed duplicate AccessControlAllowOrigin in CORS
+- (api change) expose ServeMux in container
+- (api add) added AllowedDomains in CORS
+- (api add) ParameterNamed for detailed documentation
+
+2014-04-16
+
+- (api add) expose constructor of Request for testing.
+
+2014-06-27
+
+- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
+- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
+
+2014-07-03
+
+- (api add) CORS can be configured with a list of allowed domains
+
+2014-03-12
+
+- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
+
+2014-02-26
+
+- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
+
+2014-02-17
+
+- (api change) renamed parameter constants (go-lint checks)
+
+2014-01-10
+
+- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
+
+2014-01-07
+
+- (api change) Write* methods in Response now return the error or nil.
+- added example of serving HTML from a Go template.
+- fixed comparing Allowed headers in CORS (is now case-insensitive)
+
+2013-11-13
+
+- (api add) Response knows how many bytes are written to the response body.
+
+2013-10-29
+
+- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
+
+2013-10-04
+
+- (api add) Response knows what HTTP status has been written
+- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
+
+2013-09-12
+
+- (api change) Router interface simplified
+- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
+
+2013-08-05
+ - add OPTIONS support
+ - add CORS support
+
+2013-08-27
+
+- fixed some reported issues (see github)
+- (api change) deprecated use of WriteError; use WriteErrorString instead
+
+2014-04-15
+
+- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
+
+2013-08-08
+
+- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
+- (api add) the swagger package has be extended to have a UI per container.
+- if panic is detected then a small stack trace is printed (thanks to runner-mei)
+- (api add) WriteErrorString to Response
+
+Important API changes:
+
+- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
+- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
+
+
+2013-07-06
+
+- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
+
+2013-06-19
+
+- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
+
+2013-06-03
+
+- (api change) removed Dispatcher interface, hide PathExpression
+- changed receiver names of type functions to be more idiomatic Go
+
+2013-06-02
+
+- (optimize) Cache the RegExp compilation of Paths.
+
+2013-05-22
+
+- (api add) Added support for request/response filter functions
+
+2013-05-18
+
+
+- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
+- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
+
+[2012-11-14 .. 2013-05-18>
+
+- See https://github.com/emicklei/go-restful/commits
+
+2012-11-14
+
+- Initial commit
+
+
diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE
new file mode 100644
index 0000000..ece7ec6
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2012,2013 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile
new file mode 100644
index 0000000..b40081c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/Makefile
@@ -0,0 +1,7 @@
+all: test
+
+test:
+ go test -v .
+
+ex:
+ cd examples && ls *.go | xargs go build -o /tmp/ignore
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md
new file mode 100644
index 0000000..f52c25a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/README.md
@@ -0,0 +1,88 @@
+go-restful
+==========
+package for building REST-style Web Services using Google Go
+
+[](https://travis-ci.org/emicklei/go-restful)
+[](https://goreportcard.com/report/github.com/emicklei/go-restful)
+[](https://godoc.org/github.com/emicklei/go-restful)
+
+- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
+
+REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
+
+- GET = Retrieve a representation of a resource
+- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
+- PUT = Create if you are sending the full content of the specified resource (URI).
+- PUT = Update if you are updating the full content of the specified resource.
+- DELETE = Delete if you are requesting the server to delete the resource
+- PATCH = Update partial content of a resource
+- OPTIONS = Get information about the communication options for the request URI
+
+### Example
+
+```Go
+ws := new(restful.WebService)
+ws.
+ Path("/users").
+ Consumes(restful.MIME_XML, restful.MIME_JSON).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ws.Route(ws.GET("/{user-id}").To(u.findUser).
+ Doc("get a user").
+ Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
+ Writes(User{}))
+...
+
+func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+}
+```
+
+[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
+
+### Features
+
+- Routes for request → function mapping with path parameter (e.g. {id}) support
+- Configurable router:
+ - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
+ - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
+- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
+- Response API for writing structs to JSON/XML and setting headers
+- Customizable encoding using EntityReaderWriter registration
+- Filters for intercepting the request → response flow on Service or Route level
+- Request-scoped variables using attributes
+- Containers for WebServices on different HTTP endpoints
+- Content encoding (gzip,deflate) of request and response payloads
+- Automatic responses on OPTIONS (using a filter)
+- Automatic CORS request handling (using a filter)
+- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
+- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
+- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
+- Configurable (trace) logging
+- Customizable gzip/deflate readers and writers using CompressorProvider registration
+
+## How to customize
+There are several hooks to customize the behavior of the go-restful package.
+
+- Router algorithm
+- Panic recovery
+- JSON decoder
+- Trace logging
+- Compression
+- Encoders for other serializers
+- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
+
+TODO: write examples of these.
+
+## Resources
+
+- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
+- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
+- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
+- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
+
+Type ```git shortlog -s``` for a full list of contributors.
+
+© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome.
diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile
new file mode 100644
index 0000000..16fd186
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/Srcfile
@@ -0,0 +1 @@
+{"SkipDirs": ["examples"]}
diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh
new file mode 100644
index 0000000..47ffbe4
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/bench_test.sh
@@ -0,0 +1,10 @@
+#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
+
+go test -c
+./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
+./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
+
+#go tool pprof go-restful.test tmp.prof
+go tool pprof go-restful.test curly.prof
+
+
diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go
new file mode 100644
index 0000000..220b377
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compress.go
@@ -0,0 +1,123 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bufio"
+ "compress/gzip"
+ "compress/zlib"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
+var EnableContentEncoding = false
+
+// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
+type CompressingResponseWriter struct {
+ writer http.ResponseWriter
+ compressor io.WriteCloser
+ encoding string
+}
+
+// Header is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) Header() http.Header {
+ return c.writer.Header()
+}
+
+// WriteHeader is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) WriteHeader(status int) {
+ c.writer.WriteHeader(status)
+}
+
+// Write is part of http.ResponseWriter interface
+// It is passed through the compressor
+func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
+ if c.isCompressorClosed() {
+ return -1, errors.New("Compressing error: tried to write data using closed compressor")
+ }
+ return c.compressor.Write(bytes)
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
+ return c.writer.(http.CloseNotifier).CloseNotify()
+}
+
+// Close the underlying compressor
+func (c *CompressingResponseWriter) Close() error {
+ if c.isCompressorClosed() {
+ return errors.New("Compressing error: tried to close already closed compressor")
+ }
+
+ c.compressor.Close()
+ if ENCODING_GZIP == c.encoding {
+ currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
+ }
+ if ENCODING_DEFLATE == c.encoding {
+ currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
+ }
+ // gc hint needed?
+ c.compressor = nil
+ return nil
+}
+
+func (c *CompressingResponseWriter) isCompressorClosed() bool {
+ return nil == c.compressor
+}
+
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hijacker, ok := c.writer.(http.Hijacker)
+ if !ok {
+ return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
+// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
+func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
+ header := httpRequest.Header.Get(HEADER_AcceptEncoding)
+ gi := strings.Index(header, ENCODING_GZIP)
+ zi := strings.Index(header, ENCODING_DEFLATE)
+ // use in order of appearance
+ if gi == -1 {
+ return zi != -1, ENCODING_DEFLATE
+ } else if zi == -1 {
+ return gi != -1, ENCODING_GZIP
+ } else {
+ if gi < zi {
+ return true, ENCODING_GZIP
+ }
+ return true, ENCODING_DEFLATE
+ }
+}
+
+// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
+func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
+ httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
+ c := new(CompressingResponseWriter)
+ c.writer = httpWriter
+ var err error
+ if ENCODING_GZIP == encoding {
+ w := currentCompressorProvider.AcquireGzipWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_GZIP
+ } else if ENCODING_DEFLATE == encoding {
+ w := currentCompressorProvider.AcquireZlibWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_DEFLATE
+ } else {
+ return nil, errors.New("Unknown encoding:" + encoding)
+ }
+ return c, err
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go
new file mode 100644
index 0000000..ee42601
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressor_cache.go
@@ -0,0 +1,103 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
+// of writers and readers (resources).
+// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
+type BoundedCachedCompressors struct {
+ gzipWriters chan *gzip.Writer
+ gzipReaders chan *gzip.Reader
+ zlibWriters chan *zlib.Writer
+ writersCapacity int
+ readersCapacity int
+}
+
+// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
+func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
+ b := &BoundedCachedCompressors{
+ gzipWriters: make(chan *gzip.Writer, writersCapacity),
+ gzipReaders: make(chan *gzip.Reader, readersCapacity),
+ zlibWriters: make(chan *zlib.Writer, writersCapacity),
+ writersCapacity: writersCapacity,
+ readersCapacity: readersCapacity,
+ }
+ for ix := 0; ix < writersCapacity; ix++ {
+ b.gzipWriters <- newGzipWriter()
+ b.zlibWriters <- newZlibWriter()
+ }
+ for ix := 0; ix < readersCapacity; ix++ {
+ b.gzipReaders <- newGzipReader()
+ }
+ return b
+}
+
+// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
+ var writer *gzip.Writer
+ select {
+ case writer, _ = <-b.gzipWriters:
+ default:
+ // return a new unmanaged one
+ writer = newGzipWriter()
+ }
+ return writer
+}
+
+// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
+ // forget the unmanaged ones
+ if len(b.gzipWriters) < b.writersCapacity {
+ b.gzipWriters <- w
+ }
+}
+
+// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
+ var reader *gzip.Reader
+ select {
+ case reader, _ = <-b.gzipReaders:
+ default:
+ // return a new unmanaged one
+ reader = newGzipReader()
+ }
+ return reader
+}
+
+// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
+ // forget the unmanaged ones
+ if len(b.gzipReaders) < b.readersCapacity {
+ b.gzipReaders <- r
+ }
+}
+
+// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
+ var writer *zlib.Writer
+ select {
+ case writer, _ = <-b.zlibWriters:
+ default:
+ // return a new unmanaged one
+ writer = newZlibWriter()
+ }
+ return writer
+}
+
+// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
+ // forget the unmanaged ones
+ if len(b.zlibWriters) < b.writersCapacity {
+ b.zlibWriters <- w
+ }
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go
new file mode 100644
index 0000000..d866ce6
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressor_pools.go
@@ -0,0 +1,91 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "compress/gzip"
+ "compress/zlib"
+ "sync"
+)
+
+// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
+type SyncPoolCompessors struct {
+ GzipWriterPool *sync.Pool
+ GzipReaderPool *sync.Pool
+ ZlibWriterPool *sync.Pool
+}
+
+// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
+func NewSyncPoolCompessors() *SyncPoolCompessors {
+ return &SyncPoolCompessors{
+ GzipWriterPool: &sync.Pool{
+ New: func() interface{} { return newGzipWriter() },
+ },
+ GzipReaderPool: &sync.Pool{
+ New: func() interface{} { return newGzipReader() },
+ },
+ ZlibWriterPool: &sync.Pool{
+ New: func() interface{} { return newZlibWriter() },
+ },
+ }
+}
+
+func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
+ return s.GzipWriterPool.Get().(*gzip.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
+ s.GzipWriterPool.Put(w)
+}
+
+func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
+ return s.GzipReaderPool.Get().(*gzip.Reader)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
+ s.GzipReaderPool.Put(r)
+}
+
+func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
+ return s.ZlibWriterPool.Get().(*zlib.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
+ s.ZlibWriterPool.Put(w)
+}
+
+func newGzipWriter() *gzip.Writer {
+ // create with an empty bytes writer; it will be replaced before using the gzipWriter
+ writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
+
+func newGzipReader() *gzip.Reader {
+ // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
+ // we can safely use currentCompressProvider because it is set on package initialization.
+ w := currentCompressorProvider.AcquireGzipWriter()
+ defer currentCompressorProvider.ReleaseGzipWriter(w)
+ b := new(bytes.Buffer)
+ w.Reset(b)
+ w.Flush()
+ w.Close()
+ reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
+ if err != nil {
+ panic(err.Error())
+ }
+ return reader
+}
+
+func newZlibWriter() *zlib.Writer {
+ writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go
new file mode 100644
index 0000000..9db4a8c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressors.go
@@ -0,0 +1,54 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+// CompressorProvider describes a component that can provider compressors for the std methods.
+type CompressorProvider interface {
+ // Returns a *gzip.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireGzipWriter() *gzip.Writer
+
+ // Releases an acquired *gzip.Writer.
+ ReleaseGzipWriter(w *gzip.Writer)
+
+ // Returns a *gzip.Reader which needs to be released later.
+ AcquireGzipReader() *gzip.Reader
+
+ // Releases an acquired *gzip.Reader.
+ ReleaseGzipReader(w *gzip.Reader)
+
+ // Returns a *zlib.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireZlibWriter() *zlib.Writer
+
+ // Releases an acquired *zlib.Writer.
+ ReleaseZlibWriter(w *zlib.Writer)
+}
+
+// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
+var currentCompressorProvider CompressorProvider
+
+func init() {
+ currentCompressorProvider = NewSyncPoolCompessors()
+}
+
+// CurrentCompressorProvider returns the current CompressorProvider.
+// It is initialized using a SyncPoolCompessors.
+func CurrentCompressorProvider() CompressorProvider {
+ return currentCompressorProvider
+}
+
+// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
+func SetCompressorProvider(p CompressorProvider) {
+ if p == nil {
+ panic("cannot set compressor provider to nil")
+ }
+ currentCompressorProvider = p
+}
diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go
new file mode 100644
index 0000000..203439c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/constants.go
@@ -0,0 +1,30 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
+
+ HEADER_Allow = "Allow"
+ HEADER_Accept = "Accept"
+ HEADER_Origin = "Origin"
+ HEADER_ContentType = "Content-Type"
+ HEADER_LastModified = "Last-Modified"
+ HEADER_AcceptEncoding = "Accept-Encoding"
+ HEADER_ContentEncoding = "Content-Encoding"
+ HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
+ HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
+ HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
+ HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
+ HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
+ HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
+ HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
+ HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
+
+ ENCODING_GZIP = "gzip"
+ ENCODING_DEFLATE = "deflate"
+)
diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go
new file mode 100644
index 0000000..2638cb2
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/container.go
@@ -0,0 +1,374 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
+// The requests are further dispatched to routes of WebServices using a RouteSelector
+type Container struct {
+ webServicesLock sync.RWMutex
+ webServices []*WebService
+ ServeMux *http.ServeMux
+ isRegisteredOnRoot bool
+ containerFilters []FilterFunction
+ doNotRecover bool // default is true
+ recoverHandleFunc RecoverHandleFunction
+ serviceErrorHandleFunc ServiceErrorHandleFunction
+ router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
+ contentEncodingEnabled bool // default is false
+}
+
+// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
+func NewContainer() *Container {
+ return &Container{
+ webServices: []*WebService{},
+ ServeMux: http.NewServeMux(),
+ isRegisteredOnRoot: false,
+ containerFilters: []FilterFunction{},
+ doNotRecover: true,
+ recoverHandleFunc: logStackOnRecover,
+ serviceErrorHandleFunc: writeServiceError,
+ router: CurlyRouter{},
+ contentEncodingEnabled: false}
+}
+
+// RecoverHandleFunction declares functions that can be used to handle a panic situation.
+// The first argument is what recover() returns. The second must be used to communicate an error response.
+type RecoverHandleFunction func(interface{}, http.ResponseWriter)
+
+// RecoverHandler changes the default function (logStackOnRecover) to be called
+// when a panic is detected. DoNotRecover must be have its default value (=false).
+func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
+ c.recoverHandleFunc = handler
+}
+
+// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
+// The first argument is the service error, the second is the request that resulted in the error and
+// the third must be used to communicate an error response.
+type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
+
+// ServiceErrorHandler changes the default function (writeServiceError) to be called
+// when a ServiceError is detected.
+func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
+ c.serviceErrorHandleFunc = handler
+}
+
+// DoNotRecover controls whether panics will be caught to return HTTP 500.
+// If set to true, Route functions are responsible for handling any error situation.
+// Default value is true.
+func (c *Container) DoNotRecover(doNot bool) {
+ c.doNotRecover = doNot
+}
+
+// Router changes the default Router (currently CurlyRouter)
+func (c *Container) Router(aRouter RouteSelector) {
+ c.router = aRouter
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
+func (c *Container) EnableContentEncoding(enabled bool) {
+ c.contentEncodingEnabled = enabled
+}
+
+// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
+func (c *Container) Add(service *WebService) *Container {
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+
+ // if rootPath was not set then lazy initialize it
+ if len(service.rootPath) == 0 {
+ service.Path("/")
+ }
+
+ // cannot have duplicate root paths
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ log.Printf("WebService with duplicate root path detected:['%v']", each)
+ os.Exit(1)
+ }
+ }
+
+ // If not registered on root then add specific mapping
+ if !c.isRegisteredOnRoot {
+ c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
+ }
+ c.webServices = append(c.webServices, service)
+ return c
+}
+
+// addHandler may set a new HandleFunc for the serveMux
+// this function must run inside the critical region protected by the webServicesLock.
+// returns true if the function was registered on root ("/")
+func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
+ pattern := fixedPrefixPath(service.RootPath())
+ // check if root path registration is needed
+ if "/" == pattern || "" == pattern {
+ serveMux.HandleFunc("/", c.dispatch)
+ return true
+ }
+ // detect if registration already exists
+ alreadyMapped := false
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ alreadyMapped = true
+ break
+ }
+ }
+ if !alreadyMapped {
+ serveMux.HandleFunc(pattern, c.dispatch)
+ if !strings.HasSuffix(pattern, "/") {
+ serveMux.HandleFunc(pattern+"/", c.dispatch)
+ }
+ }
+ return false
+}
+
+func (c *Container) Remove(ws *WebService) error {
+ if c.ServeMux == http.DefaultServeMux {
+ errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
+ log.Print(errMsg)
+ return errors.New(errMsg)
+ }
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+ // build a new ServeMux and re-register all WebServices
+ newServeMux := http.NewServeMux()
+ newServices := []*WebService{}
+ newIsRegisteredOnRoot := false
+ for _, each := range c.webServices {
+ if each.rootPath != ws.rootPath {
+ // If not registered on root then add specific mapping
+ if !newIsRegisteredOnRoot {
+ newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
+ }
+ newServices = append(newServices, each)
+ }
+ }
+ c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
+ return nil
+}
+
+// logStackOnRecover is the default RecoverHandleFunction and is called
+// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
+// Default implementation logs the stacktrace and writes the stacktrace on the response.
+// This may be a security issue as it exposes sourcecode information.
+func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
+ for i := 2; ; i += 1 {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
+ }
+ log.Print(buffer.String())
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ httpWriter.Write(buffer.Bytes())
+}
+
+// writeServiceError is the default ServiceErrorHandleFunction and is called
+// when a ServiceError is returned during route selection. Default implementation
+// calls resp.WriteErrorString(err.Code, err.Message)
+func writeServiceError(err ServiceError, req *Request, resp *Response) {
+ resp.WriteErrorString(err.Code, err.Message)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ if httpWriter == nil {
+ panic("httpWriter cannot be nil")
+ }
+ if httpRequest == nil {
+ panic("httpRequest cannot be nil")
+ }
+ c.dispatch(httpWriter, httpRequest)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ writer := httpWriter
+
+ // CompressingResponseWriter should be closed after all operations are done
+ defer func() {
+ if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
+ compressWriter.Close()
+ }
+ }()
+
+ // Instal panic recovery unless told otherwise
+ if !c.doNotRecover { // catch all for 500 response
+ defer func() {
+ if r := recover(); r != nil {
+ c.recoverHandleFunc(r, writer)
+ return
+ }
+ }()
+ }
+
+ // Find best match Route ; err is non nil if no match was found
+ var webService *WebService
+ var route *Route
+ var err error
+ func() {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ webService, route, err = c.router.SelectRoute(
+ c.webServices,
+ httpRequest)
+ }()
+
+ // Detect if compression is needed
+ // assume without compression, test for override
+ contentEncodingEnabled := c.contentEncodingEnabled
+ if route != nil && route.contentEncodingEnabled != nil {
+ contentEncodingEnabled = *route.contentEncodingEnabled
+ }
+ if contentEncodingEnabled {
+ doCompress, encoding := wantsCompressedResponse(httpRequest)
+ if doCompress {
+ var err error
+ writer, err = NewCompressingResponseWriter(httpWriter, encoding)
+ if err != nil {
+ log.Print("unable to install compressor: ", err)
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+
+ if err != nil {
+ // a non-200 response has already been written
+ // run container filters anyway ; they should not touch the response...
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ switch err.(type) {
+ case ServiceError:
+ ser := err.(ServiceError)
+ c.serviceErrorHandleFunc(ser, req, resp)
+ }
+ // TODO
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
+ return
+ }
+ pathProcessor, routerProcessesPath := c.router.(PathProcessor)
+ if !routerProcessesPath {
+ pathProcessor = defaultPathProcessor{}
+ }
+ pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
+ wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
+ // pass through filters (if any)
+ if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 {
+ // compose filter chain
+ allFilters := make([]FilterFunction, 0, size)
+ allFilters = append(allFilters, c.containerFilters...)
+ allFilters = append(allFilters, webService.filters...)
+ allFilters = append(allFilters, route.Filters...)
+ chain := FilterChain{Filters: allFilters, Target: route.Function}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // no filters, handle request by route
+ route.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
+func fixedPrefixPath(pathspec string) string {
+ varBegin := strings.Index(pathspec, "{")
+ if -1 == varBegin {
+ return pathspec
+ }
+ return pathspec[:varBegin]
+}
+
+// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
+func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
+ c.ServeMux.ServeHTTP(httpwriter, httpRequest)
+}
+
+// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
+func (c *Container) Handle(pattern string, handler http.Handler) {
+ c.ServeMux.Handle(pattern, handler)
+}
+
+// HandleWithFilter registers the handler for the given pattern.
+// Container's filter chain is applied for handler.
+// If a handler already exists for pattern, HandleWithFilter panics.
+func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
+ f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
+ if len(c.containerFilters) == 0 {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ return
+ }
+
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
+ }
+
+ c.Handle(pattern, http.HandlerFunc(f))
+}
+
+// Filter appends a container FilterFunction. These are called before dispatching
+// a http.Request to a WebService from the container
+func (c *Container) Filter(filter FilterFunction) {
+ c.containerFilters = append(c.containerFilters, filter)
+}
+
+// RegisteredWebServices returns the collections of added WebServices
+func (c *Container) RegisteredWebServices() []*WebService {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ result := make([]*WebService, len(c.webServices))
+ for ix := range c.webServices {
+ result[ix] = c.webServices[ix]
+ }
+ return result
+}
+
+// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
+func (c *Container) computeAllowedMethods(req *Request) []string {
+ // Go through all RegisteredWebServices() and all its Routes to collect the options
+ methods := []string{}
+ requestPath := req.Request.URL.Path
+ for _, ws := range c.RegisteredWebServices() {
+ matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ finalMatch := matches[len(matches)-1]
+ for _, rt := range ws.Routes() {
+ matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ methods = append(methods, rt.Method)
+ }
+ }
+ }
+ }
+ }
+ // methods = append(methods, "OPTIONS") not sure about this
+ return methods
+}
+
+// newBasicRequestResponse creates a pair of Request,Response from its http versions.
+// It is basic because no parameter or (produces) content-type information is given.
+func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ resp := NewResponse(httpWriter)
+ resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ return NewRequest(httpRequest), resp
+}
diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go
new file mode 100644
index 0000000..1efeef0
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/cors_filter.go
@@ -0,0 +1,202 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
+// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
+// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
+//
+// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
+// http://enable-cors.org/server.html
+// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
+type CrossOriginResourceSharing struct {
+ ExposeHeaders []string // list of Header names
+ AllowedHeaders []string // list of Header names
+ AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
+ AllowedMethods []string
+ MaxAge int // number of seconds before requiring new Options request
+ CookiesAllowed bool
+ Container *Container
+
+ allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
+}
+
+// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
+// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
+func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if len(origin) == 0 {
+ if trace {
+ traceLogger.Print("no Http header Origin set")
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if !c.isOriginAllowed(origin) { // check whether this origin is allowed
+ if trace {
+ traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if req.Request.Method != "OPTIONS" {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
+ c.doPreflightRequest(req, resp)
+ } else {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+}
+
+func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
+ c.setOptionsHeaders(req, resp)
+ // continue processing the response
+}
+
+func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
+ if len(c.AllowedMethods) == 0 {
+ if c.Container == nil {
+ c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
+ } else {
+ c.AllowedMethods = c.Container.computeAllowedMethods(req)
+ }
+ }
+
+ acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
+ if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestMethod,
+ acrm,
+ c.AllowedMethods)
+ }
+ return
+ }
+ acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+ if len(acrhs) > 0 {
+ for _, each := range strings.Split(acrhs, ",") {
+ if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestHeaders,
+ acrhs,
+ c.AllowedHeaders)
+ }
+ return
+ }
+ }
+ }
+ resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
+ resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
+ c.setOptionsHeaders(req, resp)
+
+ // return http 200 response, no body
+}
+
+func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
+ c.checkAndSetExposeHeaders(resp)
+ c.setAllowOriginHeader(req, resp)
+ c.checkAndSetAllowCredentials(resp)
+ if c.MaxAge > 0 {
+ resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
+ }
+}
+
+func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
+ if len(origin) == 0 {
+ return false
+ }
+ if len(c.AllowedDomains) == 0 {
+ return true
+ }
+
+ allowed := false
+ for _, domain := range c.AllowedDomains {
+ if domain == origin {
+ allowed = true
+ break
+ }
+ }
+
+ if !allowed {
+ if len(c.allowedOriginPatterns) == 0 {
+ // compile allowed domains to allowed origin patterns
+ allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
+ if err != nil {
+ return false
+ }
+ c.allowedOriginPatterns = allowedOriginRegexps
+ }
+
+ for _, pattern := range c.allowedOriginPatterns {
+ if allowed = pattern.MatchString(origin); allowed {
+ break
+ }
+ }
+ }
+
+ return allowed
+}
+
+func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if c.isOriginAllowed(origin) {
+ resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
+ if len(c.ExposeHeaders) > 0 {
+ resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
+ if c.CookiesAllowed {
+ resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
+ }
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
+ for _, each := range allowedMethods {
+ if each == method {
+ return true
+ }
+ }
+ return false
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
+ for _, each := range c.AllowedHeaders {
+ if strings.ToLower(each) == strings.ToLower(header) {
+ return true
+ }
+ }
+ return false
+}
+
+// Take a list of strings and compile them into a list of regular expressions.
+func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+ regexps := []*regexp.Regexp{}
+ for _, regexpStr := range regexpStrings {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return regexps, err
+ }
+ regexps = append(regexps, r)
+ }
+ return regexps, nil
+}
diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh
new file mode 100644
index 0000000..e27dbf1
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/coverage.sh
@@ -0,0 +1,2 @@
+go test -coverprofile=coverage.out
+go tool cover -html=coverage.out
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go
new file mode 100644
index 0000000..14d5b76
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/curly.go
@@ -0,0 +1,164 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
+type CurlyRouter struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (c CurlyRouter) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
+
+ requestTokens := tokenizePath(httpRequest.URL.Path)
+
+ detectedService := c.detectWebService(requestTokens, webServices)
+ if detectedService == nil {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
+ }
+ return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ candidateRoutes := c.selectRoutes(detectedService, requestTokens)
+ if len(candidateRoutes) == 0 {
+ if trace {
+ traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
+ }
+ return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
+ if selectedRoute == nil {
+ return detectedService, nil, err
+ }
+ return detectedService, selectedRoute, nil
+}
+
+// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
+func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
+ candidates := make(sortableCurlyRoutes, 0, 8)
+ for _, each := range ws.routes {
+ matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
+ if matches {
+ candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ }
+ }
+ sort.Sort(candidates)
+ return candidates
+}
+
+// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
+func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
+ if len(routeTokens) < len(requestTokens) {
+ // proceed in matching only if last routeToken is wildcard
+ count := len(routeTokens)
+ if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
+ return false, 0, 0
+ }
+ // proceed
+ }
+ for i, routeToken := range routeTokens {
+ if i == len(requestTokens) {
+ // reached end of request path
+ return false, 0, 0
+ }
+ requestToken := requestTokens[i]
+ if strings.HasPrefix(routeToken, "{") {
+ paramCount++
+ if colon := strings.Index(routeToken, ":"); colon != -1 {
+ // match by regex
+ matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
+ if !matchesToken {
+ return false, 0, 0
+ }
+ if matchesRemainder {
+ break
+ }
+ }
+ } else { // no { prefix
+ if requestToken != routeToken {
+ return false, 0, 0
+ }
+ staticCount++
+ }
+ }
+ return true, paramCount, staticCount
+}
+
+// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
+// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
+func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
+ regPart := routeToken[colon+1 : len(routeToken)-1]
+ if regPart == "*" {
+ if trace {
+ traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
+ }
+ return true, true
+ }
+ matched, err := regexp.MatchString(regPart, requestToken)
+ return (matched && err == nil), false
+}
+
+var jsr311Router = RouterJSR311{}
+
+// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
+// headers of the Request. See also RouterJSR311 in jsr311.go
+func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
+ // tracing is done inside detectRoute
+ return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
+}
+
+// detectWebService returns the best matching webService given the list of path tokens.
+// see also computeWebserviceScore
+func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
+ var best *WebService
+ score := -1
+ for _, each := range webServices {
+ matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+ if matches && (eachScore > score) {
+ best = each
+ score = eachScore
+ }
+ }
+ return best
+}
+
+// computeWebserviceScore returns whether tokens match and
+// the weighted score of the longest matching consecutive tokens from the beginning.
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
+ if len(tokens) > len(requestTokens) {
+ return false, 0
+ }
+ score := 0
+ for i := 0; i < len(tokens); i++ {
+ each := requestTokens[i]
+ other := tokens[i]
+ if len(each) == 0 && len(other) == 0 {
+ score++
+ continue
+ }
+ if len(other) > 0 && strings.HasPrefix(other, "{") {
+ // no empty match
+ if len(each) == 0 {
+ return false, score
+ }
+ score += 1
+ } else {
+ // not a parameter
+ if each != other {
+ return false, score
+ }
+ score += (len(tokens) - i) * 10 //fuzzy
+ }
+ }
+ return true, score
+}
diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go
new file mode 100644
index 0000000..403dd3b
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/curly_route.go
@@ -0,0 +1,54 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
+type curlyRoute struct {
+ route Route
+ paramCount int
+ staticCount int
+}
+
+// sortableCurlyRoutes orders by most parameters and path elements first.
+type sortableCurlyRoutes []curlyRoute
+
+func (s *sortableCurlyRoutes) add(route curlyRoute) {
+ *s = append(*s, route)
+}
+
+func (s sortableCurlyRoutes) routes() (routes []Route) {
+ routes = make([]Route, 0, len(s))
+ for _, each := range s {
+ routes = append(routes, each.route) // TODO change return type
+ }
+ return routes
+}
+
+func (s sortableCurlyRoutes) Len() int {
+ return len(s)
+}
+func (s sortableCurlyRoutes) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s sortableCurlyRoutes) Less(i, j int) bool {
+ a := s[j]
+ b := s[i]
+
+ // primary key
+ if a.staticCount < b.staticCount {
+ return true
+ }
+ if a.staticCount > b.staticCount {
+ return false
+ }
+ // secundary key
+ if a.paramCount < b.paramCount {
+ return true
+ }
+ if a.paramCount > b.paramCount {
+ return false
+ }
+ return a.route.Path < b.route.Path
+}
diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go
new file mode 100644
index 0000000..f7c16b0
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/doc.go
@@ -0,0 +1,185 @@
+/*
+Package restful , a lean package for creating REST-style WebServices without magic.
+
+WebServices and Routes
+
+A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
+Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
+WebServices must be added to a container (see below) in order to handler Http requests from a server.
+
+A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
+This package has the logic to find the best matching Route and if found, call its Function.
+
+ ws := new(restful.WebService)
+ ws.
+ Path("/users").
+ Consumes(restful.MIME_JSON, restful.MIME_XML).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
+
+ ...
+
+ // GET http://localhost:8080/users/1
+ func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+ }
+
+The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
+
+Regular expression matching Routes
+
+A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
+For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
+Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
+This feature requires the use of a CurlyRouter.
+
+Containers
+
+A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
+Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
+The Default container of go-restful uses the http.DefaultServeMux.
+You can create your own Container and create a new http.Server for that particular container.
+
+ container := restful.NewContainer()
+ server := &http.Server{Addr: ":8081", Handler: container}
+
+Filters
+
+A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
+You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
+In the restful package there are three hooks into the request,response flow where filters can be added.
+Each filter must define a FilterFunction:
+
+ func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
+
+Use the following statement to pass the request,response pair to the next filter or RouteFunction
+
+ chain.ProcessFilter(req, resp)
+
+Container Filters
+
+These are processed before any registered WebService.
+
+ // install a (global) filter for the default container (processed before any webservice)
+ restful.Filter(globalLogging)
+
+WebService Filters
+
+These are processed before any Route of a WebService.
+
+ // install a webservice filter (processed before any route)
+ ws.Filter(webserviceLogging).Filter(measureTime)
+
+
+Route Filters
+
+These are processed before calling the function associated with the Route.
+
+ // install 2 chained route filters (processed before calling findUser)
+ ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
+
+Response Encoding
+
+Two encodings are supported: gzip and deflate. To enable this for all responses:
+
+ restful.DefaultContainer.EnableContentEncoding(true)
+
+If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
+Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
+
+OPTIONS support
+
+By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
+
+ Filter(OPTIONSFilter())
+
+CORS
+
+By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
+
+ cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
+ Filter(cors.Filter)
+
+Error Handling
+
+Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
+For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
+
+ 400: Bad Request
+
+If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
+
+ 404: Not Found
+
+Despite a valid URI, the resource requested may not be available
+
+ 500: Internal Server Error
+
+If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
+
+ 405: Method Not Allowed
+
+The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
+
+ 406: Not Acceptable
+
+The request does not have or has an unknown Accept Header set for this operation.
+
+ 415: Unsupported Media Type
+
+The request does not have or has an unknown Content-Type Header set for this operation.
+
+ServiceError
+
+In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
+
+Performance options
+
+This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
+
+ restful.DefaultContainer.DoNotRecover(false)
+
+DoNotRecover controls whether panics will be caught to return HTTP 500.
+If set to false, the container will recover from panics.
+Default value is true
+
+ restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
+
+If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
+Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
+
+Trouble shooting
+
+This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
+Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
+
+ restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
+
+Logging
+
+The restful.SetLogger() method allows you to override the logger used by the package. By default restful
+uses the standard library `log` package and logs to stdout. Different logging packages are supported as
+long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
+preferred package is simple.
+
+Resources
+
+[project]: https://github.com/emicklei/go-restful
+
+[examples]: https://github.com/emicklei/go-restful/blob/master/examples
+
+[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
+
+[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
+
+(c) 2012-2015, http://ernestmicklei.com. MIT License
+*/
+package restful
diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go
new file mode 100644
index 0000000..66dfc82
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go
@@ -0,0 +1,162 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "encoding/xml"
+ "strings"
+ "sync"
+)
+
+// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
+type EntityReaderWriter interface {
+ // Read a serialized version of the value from the request.
+ // The Request may have a decompressing reader. Depends on Content-Encoding.
+ Read(req *Request, v interface{}) error
+
+ // Write a serialized version of the value on the response.
+ // The Response may have a compressing writer. Depends on Accept-Encoding.
+ // status should be a valid Http Status code
+ Write(resp *Response, status int, v interface{}) error
+}
+
+// entityAccessRegistry is a singleton
+var entityAccessRegistry = &entityReaderWriters{
+ protection: new(sync.RWMutex),
+ accessors: map[string]EntityReaderWriter{},
+}
+
+// entityReaderWriters associates MIME to an EntityReaderWriter
+type entityReaderWriters struct {
+ protection *sync.RWMutex
+ accessors map[string]EntityReaderWriter
+}
+
+func init() {
+ RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
+ RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
+}
+
+// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
+func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
+ entityAccessRegistry.protection.Lock()
+ defer entityAccessRegistry.protection.Unlock()
+ entityAccessRegistry.accessors[mime] = erw
+}
+
+// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
+// This package is already initialized with such an accessor using the MIME_JSON contentType.
+func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
+ return entityJSONAccess{ContentType: contentType}
+}
+
+// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
+// This package is already initialized with such an accessor using the MIME_XML contentType.
+func NewEntityAccessorXML(contentType string) EntityReaderWriter {
+ return entityXMLAccess{ContentType: contentType}
+}
+
+// accessorAt returns the registered ReaderWriter for this MIME type.
+func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
+ r.protection.RLock()
+ defer r.protection.RUnlock()
+ er, ok := r.accessors[mime]
+ if !ok {
+ // retry with reverse lookup
+ // more expensive but we are in an exceptional situation anyway
+ for k, v := range r.accessors {
+ if strings.Contains(mime, k) {
+ return v, true
+ }
+ }
+ }
+ return er, ok
+}
+
+// entityXMLAccess is a EntityReaderWriter for XML encoding
+type entityXMLAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from XML
+func (e entityXMLAccess) Read(req *Request, v interface{}) error {
+ return xml.NewDecoder(req.Request.Body).Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeXML(resp, status, e.ContentType, v)
+}
+
+// writeXML marshalls the value to JSON and set the Content-Type Header.
+func writeXML(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := xml.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write([]byte(xml.Header))
+ if err != nil {
+ return err
+ }
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return xml.NewEncoder(resp).Encode(v)
+}
+
+// entityJSONAccess is a EntityReaderWriter for JSON encoding
+type entityJSONAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from JSON
+func (e entityJSONAccess) Read(req *Request, v interface{}) error {
+ decoder := NewDecoder(req.Request.Body)
+ decoder.UseNumber()
+ return decoder.Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeJSON(resp, status, e.ContentType, v)
+}
+
+// write marshalls the value to JSON and set the Content-Type Header.
+func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := MarshalIndent(v, "", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return NewEncoder(resp).Encode(v)
+}
diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go
new file mode 100644
index 0000000..c23bfb5
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/filter.go
@@ -0,0 +1,35 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
+type FilterChain struct {
+ Filters []FilterFunction // ordered list of FilterFunction
+ Index int // index into filters that is currently in progress
+ Target RouteFunction // function to call after passing all filters
+}
+
+// ProcessFilter passes the request,response pair through the next of Filters.
+// Each filter can decide to proceed to the next Filter or handle the Response itself.
+func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
+ if f.Index < len(f.Filters) {
+ f.Index++
+ f.Filters[f.Index-1](request, response, f)
+ } else {
+ f.Target(request, response)
+ }
+}
+
+// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
+type FilterFunction func(*Request, *Response, *FilterChain)
+
+// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
+// See examples/restful-no-cache-filter.go for usage
+func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
+ resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
+ resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
+ resp.Header().Set("Expires", "0") // Proxies.
+ chain.ProcessFilter(req, resp)
+}
diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go
new file mode 100644
index 0000000..8711651
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/json.go
@@ -0,0 +1,11 @@
+// +build !jsoniter
+
+package restful
+
+import "encoding/json"
+
+var (
+ MarshalIndent = json.MarshalIndent
+ NewDecoder = json.NewDecoder
+ NewEncoder = json.NewEncoder
+)
diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go
new file mode 100644
index 0000000..11b8f8a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/jsoniter.go
@@ -0,0 +1,12 @@
+// +build jsoniter
+
+package restful
+
+import "github.com/json-iterator/go"
+
+var (
+ json = jsoniter.ConfigCompatibleWithStandardLibrary
+ MarshalIndent = json.MarshalIndent
+ NewDecoder = json.NewDecoder
+ NewEncoder = json.NewEncoder
+)
diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go
new file mode 100644
index 0000000..3ede189
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/jsr311.go
@@ -0,0 +1,297 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "sort"
+)
+
+// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
+// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
+// RouterJSR311 implements the Router interface.
+// Concept of locators is not implemented.
+type RouterJSR311 struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (r RouterJSR311) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
+
+ // Identify the root resource class (WebService)
+ dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
+ if err != nil {
+ return nil, nil, NewError(http.StatusNotFound, "")
+ }
+ // Obtain the set of candidate methods (Routes)
+ routes := r.selectRoutes(dispatcher, finalMatch)
+ if len(routes) == 0 {
+ return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+
+ // Identify the method (Route) that will handle the request
+ route, ok := r.detectRoute(routes, httpRequest)
+ return dispatcher, route, ok
+}
+
+// ExtractParameters is used to obtain the path parameters from the route using the same matching
+// engine as the JSR 311 router.
+func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string {
+ webServiceExpr := webService.pathExpr
+ webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath)
+ pathParameters := r.extractParams(webServiceExpr, webServiceMatches)
+ routeExpr := route.pathExpr
+ routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1])
+ routeParams := r.extractParams(routeExpr, routeMatches)
+ for key, value := range routeParams {
+ pathParameters[key] = value
+ }
+ return pathParameters
+}
+
+func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string {
+ params := map[string]string{}
+ for i := 1; i < len(matches); i++ {
+ if len(pathExpr.VarNames) >= i {
+ params[pathExpr.VarNames[i-1]] = matches[i]
+ }
+ }
+ return params
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
+ candidates := make([]*Route, 0, 8)
+ for i, each := range routes {
+ ok := true
+ for _, fn := range each.If {
+ if !fn(httpRequest) {
+ ok = false
+ break
+ }
+ }
+ if ok {
+ candidates = append(candidates, &routes[i])
+ }
+ }
+ if len(candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes))
+ }
+ return nil, NewError(http.StatusNotFound, "404: Not Found")
+ }
+
+ // http method
+ previous := candidates
+ candidates = candidates[:0]
+ for _, each := range previous {
+ if httpRequest.Method == each.Method {
+ candidates = append(candidates, each)
+ }
+ }
+ if len(candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method)
+ }
+ return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
+ }
+
+ // content-type
+ contentType := httpRequest.Header.Get(HEADER_ContentType)
+ previous = candidates
+ candidates = candidates[:0]
+ for _, each := range previous {
+ if each.matchesContentType(contentType) {
+ candidates = append(candidates, each)
+ }
+ }
+ if len(candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
+ }
+ if httpRequest.ContentLength > 0 {
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
+ }
+ }
+
+ // accept
+ previous = candidates
+ candidates = candidates[:0]
+ accept := httpRequest.Header.Get(HEADER_Accept)
+ if len(accept) == 0 {
+ accept = "*/*"
+ }
+ for _, each := range previous {
+ if each.matchesAccept(accept) {
+ candidates = append(candidates, each)
+ }
+ }
+ if len(candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept)
+ }
+ return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
+ }
+ // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+ return candidates[0], nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// n/m > n/* > */*
+func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
+ // TODO
+ return &routes[0]
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
+func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
+ filtered := &sortableRouteCandidates{}
+ for _, each := range dispatcher.Routes() {
+ pathExpr := each.pathExpr
+ matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ filtered.candidates = append(filtered.candidates,
+ routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
+ }
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
+ }
+ return []Route{}
+ }
+ sort.Sort(sort.Reverse(filtered))
+
+ // select other routes from candidates whoes expression matches rmatch
+ matchingRoutes := []Route{filtered.candidates[0].route}
+ for c := 1; c < len(filtered.candidates); c++ {
+ each := filtered.candidates[c]
+ if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
+ matchingRoutes = append(matchingRoutes, each.route)
+ }
+ }
+ return matchingRoutes
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
+func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
+ filtered := &sortableDispatcherCandidates{}
+ for _, each := range dispatchers {
+ matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ filtered.candidates = append(filtered.candidates,
+ dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
+ }
+ return nil, "", errors.New("not found")
+ }
+ sort.Sort(sort.Reverse(filtered))
+ return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
+}
+
+// Types and functions to support the sorting of Routes
+
+type routeCandidate struct {
+ route Route
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+
+func (r routeCandidate) expressionToMatch() string {
+ return r.route.pathExpr.Source
+}
+
+func (r routeCandidate) String() string {
+ return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
+}
+
+type sortableRouteCandidates struct {
+ candidates []routeCandidate
+}
+
+func (rcs *sortableRouteCandidates) Len() int {
+ return len(rcs.candidates)
+}
+func (rcs *sortableRouteCandidates) Swap(i, j int) {
+ rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
+}
+func (rcs *sortableRouteCandidates) Less(i, j int) bool {
+ ci := rcs.candidates[i]
+ cj := rcs.candidates[j]
+ // primary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // secundary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // tertiary key
+ if ci.nonDefaultCount < cj.nonDefaultCount {
+ return true
+ }
+ if ci.nonDefaultCount > cj.nonDefaultCount {
+ return false
+ }
+ // quaternary key ("source" is interpreted as Path)
+ return ci.route.Path < cj.route.Path
+}
+
+// Types and functions to support the sorting of Dispatchers
+
+type dispatcherCandidate struct {
+ dispatcher *WebService
+ finalMatch string
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+type sortableDispatcherCandidates struct {
+ candidates []dispatcherCandidate
+}
+
+func (dc *sortableDispatcherCandidates) Len() int {
+ return len(dc.candidates)
+}
+func (dc *sortableDispatcherCandidates) Swap(i, j int) {
+ dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
+}
+func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
+ ci := dc.candidates[i]
+ cj := dc.candidates[j]
+ // primary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // secundary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // tertiary key
+ return ci.nonDefaultCount < cj.nonDefaultCount
+}
diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go
new file mode 100644
index 0000000..6cd44c7
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/log/log.go
@@ -0,0 +1,34 @@
+package log
+
+import (
+ stdlog "log"
+ "os"
+)
+
+// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+}
+
+var Logger StdLogger
+
+func init() {
+ // default Logger
+ SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
+}
+
+// SetLogger sets the logger for this package
+func SetLogger(customLogger StdLogger) {
+ Logger = customLogger
+}
+
+// Print delegates to the Logger
+func Print(v ...interface{}) {
+ Logger.Print(v...)
+}
+
+// Printf delegates to the Logger
+func Printf(format string, v ...interface{}) {
+ Logger.Printf(format, v...)
+}
diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go
new file mode 100644
index 0000000..6595df0
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/logger.go
@@ -0,0 +1,32 @@
+package restful
+
+// Copyright 2014 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+import (
+ "github.com/emicklei/go-restful/log"
+)
+
+var trace bool = false
+var traceLogger log.StdLogger
+
+func init() {
+ traceLogger = log.Logger // use the package logger by default
+}
+
+// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
+// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
+func TraceLogger(logger log.StdLogger) {
+ traceLogger = logger
+ EnableTracing(logger != nil)
+}
+
+// SetLogger exposes the setter for the global logger on the top-level package
+func SetLogger(customLogger log.StdLogger) {
+ log.SetLogger(customLogger)
+}
+
+// EnableTracing can be used to Trace logging on and off.
+func EnableTracing(enabled bool) {
+ trace = enabled
+}
diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go
new file mode 100644
index 0000000..3301447
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/mime.go
@@ -0,0 +1,50 @@
+package restful
+
+import (
+ "strconv"
+ "strings"
+)
+
+type mime struct {
+ media string
+ quality float64
+}
+
+// insertMime adds a mime to a list and keeps it sorted by quality.
+func insertMime(l []mime, e mime) []mime {
+ for i, each := range l {
+ // if current mime has lower quality then insert before
+ if e.quality > each.quality {
+ left := append([]mime{}, l[0:i]...)
+ return append(append(left, e), l[i:]...)
+ }
+ }
+ return append(l, e)
+}
+
+const qFactorWeightingKey = "q"
+
+// sortedMimes returns a list of mime sorted (desc) by its specified quality.
+// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
+func sortedMimes(accept string) (sorted []mime) {
+ for _, each := range strings.Split(accept, ",") {
+ typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
+ if len(typeAndQuality) == 1 {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+ } else {
+ // take factor
+ qAndWeight := strings.Split(typeAndQuality[1], "=")
+ if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey {
+ f, err := strconv.ParseFloat(qAndWeight[1], 64)
+ if err != nil {
+ traceLogger.Printf("unable to parse quality in %s, %v", each, err)
+ } else {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], f})
+ }
+ } else {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go
new file mode 100644
index 0000000..5c1b342
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/options_filter.go
@@ -0,0 +1,34 @@
+package restful
+
+import "strings"
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// As for any filter, you can also install it for a particular WebService within a Container.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
+ if "OPTIONS" != req.Request.Method {
+ chain.ProcessFilter(req, resp)
+ return
+ }
+
+ archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+ methods := strings.Join(c.computeAllowedMethods(req), ",")
+ origin := req.Request.Header.Get(HEADER_Origin)
+
+ resp.AddHeader(HEADER_Allow, methods)
+ resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+ resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
+ resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
+}
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func OPTIONSFilter() FilterFunction {
+ return DefaultContainer.OPTIONSFilter
+}
diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go
new file mode 100644
index 0000000..e879330
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/parameter.go
@@ -0,0 +1,143 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ // PathParameterKind = indicator of Request parameter type "path"
+ PathParameterKind = iota
+
+ // QueryParameterKind = indicator of Request parameter type "query"
+ QueryParameterKind
+
+ // BodyParameterKind = indicator of Request parameter type "body"
+ BodyParameterKind
+
+ // HeaderParameterKind = indicator of Request parameter type "header"
+ HeaderParameterKind
+
+ // FormParameterKind = indicator of Request parameter type "form"
+ FormParameterKind
+
+ // CollectionFormatCSV comma separated values `foo,bar`
+ CollectionFormatCSV = CollectionFormat("csv")
+
+ // CollectionFormatSSV space separated values `foo bar`
+ CollectionFormatSSV = CollectionFormat("ssv")
+
+ // CollectionFormatTSV tab separated values `foo\tbar`
+ CollectionFormatTSV = CollectionFormat("tsv")
+
+ // CollectionFormatPipes pipe separated values `foo|bar`
+ CollectionFormatPipes = CollectionFormat("pipes")
+
+ // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single
+ // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters
+ CollectionFormatMulti = CollectionFormat("multi")
+)
+
+type CollectionFormat string
+
+func (cf CollectionFormat) String() string {
+ return string(cf)
+}
+
+// Parameter is for documententing the parameter used in a Http Request
+// ParameterData kinds are Path,Query and Body
+type Parameter struct {
+ data *ParameterData
+}
+
+// ParameterData represents the state of a Parameter.
+// It is made public to make it accessible to e.g. the Swagger package.
+type ParameterData struct {
+ Name, Description, DataType, DataFormat string
+ Kind int
+ Required bool
+ AllowableValues map[string]string
+ AllowMultiple bool
+ DefaultValue string
+ CollectionFormat string
+}
+
+// Data returns the state of the Parameter
+func (p *Parameter) Data() ParameterData {
+ return *p.data
+}
+
+// Kind returns the parameter type indicator (see const for valid values)
+func (p *Parameter) Kind() int {
+ return p.data.Kind
+}
+
+func (p *Parameter) bePath() *Parameter {
+ p.data.Kind = PathParameterKind
+ return p
+}
+func (p *Parameter) beQuery() *Parameter {
+ p.data.Kind = QueryParameterKind
+ return p
+}
+func (p *Parameter) beBody() *Parameter {
+ p.data.Kind = BodyParameterKind
+ return p
+}
+
+func (p *Parameter) beHeader() *Parameter {
+ p.data.Kind = HeaderParameterKind
+ return p
+}
+
+func (p *Parameter) beForm() *Parameter {
+ p.data.Kind = FormParameterKind
+ return p
+}
+
+// Required sets the required field and returns the receiver
+func (p *Parameter) Required(required bool) *Parameter {
+ p.data.Required = required
+ return p
+}
+
+// AllowMultiple sets the allowMultiple field and returns the receiver
+func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
+ p.data.AllowMultiple = multiple
+ return p
+}
+
+// AllowableValues sets the allowableValues field and returns the receiver
+func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
+ p.data.AllowableValues = values
+ return p
+}
+
+// DataType sets the dataType field and returns the receiver
+func (p *Parameter) DataType(typeName string) *Parameter {
+ p.data.DataType = typeName
+ return p
+}
+
+// DataFormat sets the dataFormat field for Swagger UI
+func (p *Parameter) DataFormat(formatName string) *Parameter {
+ p.data.DataFormat = formatName
+ return p
+}
+
+// DefaultValue sets the default value field and returns the receiver
+func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
+ p.data.DefaultValue = stringRepresentation
+ return p
+}
+
+// Description sets the description value field and returns the receiver
+func (p *Parameter) Description(doc string) *Parameter {
+ p.data.Description = doc
+ return p
+}
+
+// CollectionFormat sets the collection format for an array type
+func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter {
+ p.data.CollectionFormat = format.String()
+ return p
+}
diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go
new file mode 100644
index 0000000..95a9a25
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/path_expression.go
@@ -0,0 +1,74 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// PathExpression holds a compiled path expression (RegExp) needed to match against
+// Http request paths and to extract path parameter values.
+type pathExpression struct {
+ LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
+ VarNames []string // the names of parameters (enclosed by {}) in the path
+ VarCount int // the number of named parameters (enclosed by {}) in the path
+ Matcher *regexp.Regexp
+ Source string // Path as defined by the RouteBuilder
+ tokens []string
+}
+
+// NewPathExpression creates a PathExpression from the input URL path.
+// Returns an error if the path is invalid.
+func newPathExpression(path string) (*pathExpression, error) {
+ expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
+ compiled, err := regexp.Compile(expression)
+ if err != nil {
+ return nil, err
+ }
+ return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
+func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) {
+ var buffer bytes.Buffer
+ buffer.WriteString("^")
+ //tokens = strings.Split(template, "/")
+ tokens = tokenizePath(template)
+ for _, each := range tokens {
+ if each == "" {
+ continue
+ }
+ buffer.WriteString("/")
+ if strings.HasPrefix(each, "{") {
+ // check for regular expression in variable
+ colon := strings.Index(each, ":")
+ var varName string
+ if colon != -1 {
+ // extract expression
+ varName = strings.TrimSpace(each[1:colon])
+ paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
+ if paramExpr == "*" { // special case
+ buffer.WriteString("(.*)")
+ } else {
+ buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
+ }
+ } else {
+ // plain var
+ varName = strings.TrimSpace(each[1 : len(each)-1])
+ buffer.WriteString("([^/]+?)")
+ }
+ varNames = append(varNames, varName)
+ varCount += 1
+ } else {
+ literalCount += len(each)
+ encoded := each // TODO URI encode
+ buffer.WriteString(regexp.QuoteMeta(encoded))
+ }
+ }
+ return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens
+}
diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go
new file mode 100644
index 0000000..357c723
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/path_processor.go
@@ -0,0 +1,63 @@
+package restful
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Copyright 2018 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path.
+// If a Router does not implement this interface then the default behaviour will be used.
+type PathProcessor interface {
+ // ExtractParameters gets the path parameters defined in the route and webService from the urlPath
+ ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string
+}
+
+type defaultPathProcessor struct{}
+
+// Extract the parameters from the request url path
+func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string {
+ urlParts := tokenizePath(urlPath)
+ pathParameters := map[string]string{}
+ for i, key := range r.pathParts {
+ var value string
+ if i >= len(urlParts) {
+ value = ""
+ } else {
+ value = urlParts[i]
+ }
+ if strings.HasPrefix(key, "{") { // path-parameter
+ if colon := strings.Index(key, ":"); colon != -1 {
+ // extract by regex
+ regPart := key[colon+1 : len(key)-1]
+ keyPart := key[1:colon]
+ if regPart == "*" {
+ pathParameters[keyPart] = untokenizePath(i, urlParts)
+ break
+ } else {
+ pathParameters[keyPart] = value
+ }
+ } else {
+ // without enclosing {}
+ pathParameters[key[1:len(key)-1]] = value
+ }
+ }
+ }
+ return pathParameters
+}
+
+// Untokenize back into an URL path using the slash separator
+func untokenizePath(offset int, parts []string) string {
+ var buffer bytes.Buffer
+ for p := offset; p < len(parts); p++ {
+ buffer.WriteString(parts[p])
+ // do not end
+ if p < len(parts)-1 {
+ buffer.WriteString("/")
+ }
+ }
+ return buffer.String()
+}
diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go
new file mode 100644
index 0000000..a20730f
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/request.go
@@ -0,0 +1,118 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/zlib"
+ "net/http"
+)
+
+var defaultRequestContentType string
+
+// Request is a wrapper for a http Request that provides convenience methods
+type Request struct {
+ Request *http.Request
+ pathParameters map[string]string
+ attributes map[string]interface{} // for storing request-scoped values
+ selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
+}
+
+func NewRequest(httpRequest *http.Request) *Request {
+ return &Request{
+ Request: httpRequest,
+ pathParameters: map[string]string{},
+ attributes: map[string]interface{}{},
+ } // empty parameters, attributes
+}
+
+// If ContentType is missing or */* is given then fall back to this type, otherwise
+// a "Unable to unmarshal content of type:" response is returned.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultRequestContentType(restful.MIME_JSON)
+func DefaultRequestContentType(mime string) {
+ defaultRequestContentType = mime
+}
+
+// PathParameter accesses the Path parameter value by its name
+func (r *Request) PathParameter(name string) string {
+ return r.pathParameters[name]
+}
+
+// PathParameters accesses the Path parameter values
+func (r *Request) PathParameters() map[string]string {
+ return r.pathParameters
+}
+
+// QueryParameter returns the (first) Query parameter value by its name
+func (r *Request) QueryParameter(name string) string {
+ return r.Request.FormValue(name)
+}
+
+// QueryParameters returns the all the query parameters values by name
+func (r *Request) QueryParameters(name string) []string {
+ return r.Request.URL.Query()[name]
+}
+
+// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
+func (r *Request) BodyParameter(name string) (string, error) {
+ err := r.Request.ParseForm()
+ if err != nil {
+ return "", err
+ }
+ return r.Request.PostFormValue(name), nil
+}
+
+// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
+func (r *Request) HeaderParameter(name string) string {
+ return r.Request.Header.Get(name)
+}
+
+// ReadEntity checks the Accept header and reads the content into the entityPointer.
+func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
+ contentType := r.Request.Header.Get(HEADER_ContentType)
+ contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
+
+ // check if the request body needs decompression
+ if ENCODING_GZIP == contentEncoding {
+ gzipReader := currentCompressorProvider.AcquireGzipReader()
+ defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
+ gzipReader.Reset(r.Request.Body)
+ r.Request.Body = gzipReader
+ } else if ENCODING_DEFLATE == contentEncoding {
+ zlibReader, err := zlib.NewReader(r.Request.Body)
+ if err != nil {
+ return err
+ }
+ r.Request.Body = zlibReader
+ }
+
+ // lookup the EntityReader, use defaultRequestContentType if needed and provided
+ entityReader, ok := entityAccessRegistry.accessorAt(contentType)
+ if !ok {
+ if len(defaultRequestContentType) != 0 {
+ entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
+ }
+ if !ok {
+ return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+ }
+ }
+ return entityReader.Read(r, entityPointer)
+}
+
+// SetAttribute adds or replaces the attribute with the given value.
+func (r *Request) SetAttribute(name string, value interface{}) {
+ r.attributes[name] = value
+}
+
+// Attribute returns the value associated to the given name. Returns nil if absent.
+func (r Request) Attribute(name string) interface{} {
+ return r.attributes[name]
+}
+
+// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
+func (r Request) SelectedRoutePath() string {
+ return r.selectedRoutePath
+}
diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go
new file mode 100644
index 0000000..fbb48f2
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/response.go
@@ -0,0 +1,255 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bufio"
+ "errors"
+ "net"
+ "net/http"
+)
+
+// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
+var DefaultResponseMimeType string
+
+//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
+var PrettyPrintResponses = true
+
+// Response is a wrapper on the actual http ResponseWriter
+// It provides several convenience methods to prepare and write response content.
+type Response struct {
+ http.ResponseWriter
+ requestAccept string // mime-type what the Http Request says it wants to receive
+ routeProduces []string // mime-types what the Route says it can produce
+ statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
+ contentLength int // number of bytes written for the response body
+ prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
+ err error // err property is kept when WriteError is called
+ hijacker http.Hijacker // if underlying ResponseWriter supports it
+}
+
+// NewResponse creates a new response based on a http ResponseWriter.
+func NewResponse(httpWriter http.ResponseWriter) *Response {
+ hijacker, _ := httpWriter.(http.Hijacker)
+ return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
+}
+
+// DefaultResponseContentType set a default.
+// If Accept header matching fails, fall back to this type.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultResponseContentType(restful.MIME_JSON)
+func DefaultResponseContentType(mime string) {
+ DefaultResponseMimeType = mime
+}
+
+// InternalServerError writes the StatusInternalServerError header.
+// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
+func (r Response) InternalServerError() Response {
+ r.WriteHeader(http.StatusInternalServerError)
+ return r
+}
+
+// Hijack implements the http.Hijacker interface. This expands
+// the Response to fulfill http.Hijacker if the underlying
+// http.ResponseWriter supports it.
+func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if r.hijacker == nil {
+ return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
+ }
+ return r.hijacker.Hijack()
+}
+
+// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
+func (r *Response) PrettyPrint(bePretty bool) {
+ r.prettyPrint = bePretty
+}
+
+// AddHeader is a shortcut for .Header().Add(header,value)
+func (r Response) AddHeader(header string, value string) Response {
+ r.Header().Add(header, value)
+ return r
+}
+
+// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
+func (r *Response) SetRequestAccepts(mime string) {
+ r.requestAccept = mime
+}
+
+// EntityWriter returns the registered EntityWriter that the entity (requested resource)
+// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
+// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
+func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
+ sorted := sortedMimes(r.requestAccept)
+ for _, eachAccept := range sorted {
+ for _, eachProduce := range r.routeProduces {
+ if eachProduce == eachAccept.media {
+ if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
+ return w, true
+ }
+ }
+ }
+ if eachAccept.media == "*/*" {
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ }
+ }
+ // if requestAccept is empty
+ writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
+ if !ok {
+ // if not registered then fallback to the defaults (if set)
+ if DefaultResponseMimeType == MIME_JSON {
+ return entityAccessRegistry.accessorAt(MIME_JSON)
+ }
+ if DefaultResponseMimeType == MIME_XML {
+ return entityAccessRegistry.accessorAt(MIME_XML)
+ }
+ // Fallback to whatever the route says it can produce.
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ if trace {
+ traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
+ }
+ }
+ return writer, ok
+}
+
+// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
+func (r *Response) WriteEntity(value interface{}) error {
+ return r.WriteHeaderAndEntity(http.StatusOK, value)
+}
+
+// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
+// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
+// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
+// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
+// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
+// Current implementation ignores any q-parameters in the Accept Header.
+// Returns an error if the value could not be written on the response.
+func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
+ writer, ok := r.EntityWriter()
+ if !ok {
+ r.WriteHeader(http.StatusNotAcceptable)
+ return nil
+ }
+ return writer.Write(r, status, value)
+}
+
+// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsXml(value interface{}) error {
+ return writeXML(r, http.StatusOK, MIME_XML, value)
+}
+
+// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
+ return writeXML(r, status, MIME_XML, value)
+}
+
+// WriteAsJson is a convenience method for writing a value in json.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsJson(value interface{}) error {
+ return writeJSON(r, http.StatusOK, MIME_JSON, value)
+}
+
+// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteJson(value interface{}, contentType string) error {
+ return writeJSON(r, http.StatusOK, contentType, value)
+}
+
+// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
+ return writeJSON(r, status, contentType, value)
+}
+
+// WriteError write the http status and the error string on the response. err can be nil.
+func (r *Response) WriteError(httpStatus int, err error) error {
+ r.err = err
+ if err == nil {
+ r.WriteErrorString(httpStatus, "")
+ } else {
+ r.WriteErrorString(httpStatus, err.Error())
+ }
+ return err
+}
+
+// WriteServiceError is a convenience method for a responding with a status and a ServiceError
+func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
+ r.err = err
+ return r.WriteHeaderAndEntity(httpStatus, err)
+}
+
+// WriteErrorString is a convenience method for an error status with the actual error
+func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
+ if r.err == nil {
+ // if not called from WriteError
+ r.err = errors.New(errorReason)
+ }
+ r.WriteHeader(httpStatus)
+ if _, err := r.Write([]byte(errorReason)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Flush implements http.Flusher interface, which sends any buffered data to the client.
+func (r *Response) Flush() {
+ if f, ok := r.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ } else if trace {
+ traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
+ }
+}
+
+// WriteHeader is overridden to remember the Status Code that has been written.
+// Changes to the Header of the response have no effect after this.
+func (r *Response) WriteHeader(httpStatus int) {
+ r.statusCode = httpStatus
+ r.ResponseWriter.WriteHeader(httpStatus)
+}
+
+// StatusCode returns the code that has been written using WriteHeader.
+func (r Response) StatusCode() int {
+ if 0 == r.statusCode {
+ // no status code has been written yet; assume OK
+ return http.StatusOK
+ }
+ return r.statusCode
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// Write is part of http.ResponseWriter interface.
+func (r *Response) Write(bytes []byte) (int, error) {
+ written, err := r.ResponseWriter.Write(bytes)
+ r.contentLength += written
+ return written, err
+}
+
+// ContentLength returns the number of bytes written for the response content.
+// Note that this value is only correct if all data is written through the Response using its Write* methods.
+// Data written directly using the underlying http.ResponseWriter is not accounted for.
+func (r Response) ContentLength() int {
+ return r.contentLength
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (r Response) CloseNotify() <-chan bool {
+ return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Error returns the err created by WriteError
+func (r Response) Error() error {
+ return r.err
+}
diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go
new file mode 100644
index 0000000..3385cfc
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/route.go
@@ -0,0 +1,159 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+ "strings"
+)
+
+// RouteFunction declares the signature of a function that can be bound to a Route.
+type RouteFunction func(*Request, *Response)
+
+// RouteSelectionConditionFunction declares the signature of a function that
+// can be used to add extra conditional logic when selecting whether the route
+// matches the HTTP request.
+type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
+
+// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
+type Route struct {
+ Method string
+ Produces []string
+ Consumes []string
+ Path string // webservice root path + described path
+ Function RouteFunction
+ Filters []FilterFunction
+ If []RouteSelectionConditionFunction
+
+ // cached values for dispatching
+ relativePath string
+ pathParts []string
+ pathExpr *pathExpression // cached compilation of relativePath as RegExp
+
+ // documentation
+ Doc string
+ Notes string
+ Operation string
+ ParameterDocs []*Parameter
+ ResponseErrors map[int]ResponseError
+ DefaultResponse *ResponseError
+ ReadSample, WriteSample interface{} // structs that model an example request or response payload
+
+ // Extra information used to store custom information about the route.
+ Metadata map[string]interface{}
+
+ // marks a route as deprecated
+ Deprecated bool
+
+ //Overrides the container.contentEncodingEnabled
+ contentEncodingEnabled *bool
+}
+
+// Initialize for Route
+func (r *Route) postBuild() {
+ r.pathParts = tokenizePath(r.Path)
+}
+
+// Create Request and Response from their http versions
+func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
+ wrappedRequest := NewRequest(httpRequest)
+ wrappedRequest.pathParameters = pathParams
+ wrappedRequest.selectedRoutePath = r.Path
+ wrappedResponse := NewResponse(httpWriter)
+ wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ wrappedResponse.routeProduces = r.Produces
+ return wrappedRequest, wrappedResponse
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+ return r == ' '
+}
+
+// Return whether the mimeType matches to what this Route can produce.
+func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
+ remaining := mimeTypesWithQuality
+ for {
+ var mimeType string
+ if end := strings.Index(remaining, ","); end == -1 {
+ mimeType, remaining = remaining, ""
+ } else {
+ mimeType, remaining = remaining[:end], remaining[end+1:]
+ }
+ if quality := strings.Index(mimeType, ";"); quality != -1 {
+ mimeType = mimeType[:quality]
+ }
+ mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
+ if mimeType == "*/*" {
+ return true
+ }
+ for _, producibleType := range r.Produces {
+ if producibleType == "*/*" || producibleType == mimeType {
+ return true
+ }
+ }
+ if len(remaining) == 0 {
+ return false
+ }
+ }
+}
+
+// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+func (r Route) matchesContentType(mimeTypes string) bool {
+
+ if len(r.Consumes) == 0 {
+ // did not specify what it can consume ; any media type (“*/*”) is assumed
+ return true
+ }
+
+ if len(mimeTypes) == 0 {
+ // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
+ m := r.Method
+ if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
+ return true
+ }
+ // proceed with default
+ mimeTypes = MIME_OCTET
+ }
+
+ remaining := mimeTypes
+ for {
+ var mimeType string
+ if end := strings.Index(remaining, ","); end == -1 {
+ mimeType, remaining = remaining, ""
+ } else {
+ mimeType, remaining = remaining[:end], remaining[end+1:]
+ }
+ if quality := strings.Index(mimeType, ";"); quality != -1 {
+ mimeType = mimeType[:quality]
+ }
+ mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
+ for _, consumeableType := range r.Consumes {
+ if consumeableType == "*/*" || consumeableType == mimeType {
+ return true
+ }
+ }
+ if len(remaining) == 0 {
+ return false
+ }
+ }
+}
+
+// Tokenize an URL path using the slash separator ; the result does not have empty tokens
+func tokenizePath(path string) []string {
+ if "/" == path {
+ return nil
+ }
+ return strings.Split(strings.Trim(path, "/"), "/")
+}
+
+// for debugging
+func (r Route) String() string {
+ return r.Method + " " + r.Path
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
+func (r Route) EnableContentEncoding(enabled bool) {
+ r.contentEncodingEnabled = &enabled
+}
diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go
new file mode 100644
index 0000000..0fccf61
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/route_builder.go
@@ -0,0 +1,326 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync/atomic"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// RouteBuilder is a helper to construct Routes.
+type RouteBuilder struct {
+ rootPath string
+ currentPath string
+ produces []string
+ consumes []string
+ httpMethod string // required
+ function RouteFunction // required
+ filters []FilterFunction
+ conditions []RouteSelectionConditionFunction
+
+ typeNameHandleFunc TypeNameHandleFunction // required
+
+ // documentation
+ doc string
+ notes string
+ operation string
+ readSample, writeSample interface{}
+ parameters []*Parameter
+ errorMap map[int]ResponseError
+ defaultResponse *ResponseError
+ metadata map[string]interface{}
+ deprecated bool
+ contentEncodingEnabled *bool
+}
+
+// Do evaluates each argument with the RouteBuilder itself.
+// This allows you to follow DRY principles without breaking the fluent programming style.
+// Example:
+// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+// func Returns500(b *RouteBuilder) {
+// b.Returns(500, "Internal Server Error", restful.ServiceError{})
+// }
+func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
+ for _, each := range oneArgBlocks {
+ each(b)
+ }
+ return b
+}
+
+// To bind the route to a function.
+// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
+func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
+ b.function = function
+ return b
+}
+
+// Method specifies what HTTP method to match. Required.
+func (b *RouteBuilder) Method(method string) *RouteBuilder {
+ b.httpMethod = method
+ return b
+}
+
+// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
+func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
+ b.produces = mimeTypes
+ return b
+}
+
+// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
+func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
+ b.consumes = mimeTypes
+ return b
+}
+
+// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
+func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
+ b.currentPath = subPath
+ return b
+}
+
+// Doc tells what this route is all about. Optional.
+func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
+ b.doc = documentation
+ return b
+}
+
+// Notes is a verbose explanation of the operation behavior. Optional.
+func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
+ b.notes = notes
+ return b
+}
+
+// Reads tells what resource type will be read from the request payload. Optional.
+// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
+func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder {
+ fn := b.typeNameHandleFunc
+ if fn == nil {
+ fn = reflectTypeName
+ }
+ typeAsName := fn(sample)
+ description := ""
+ if len(optionalDescription) > 0 {
+ description = optionalDescription[0]
+ }
+ b.readSample = sample
+ bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}}
+ bodyParameter.beBody()
+ bodyParameter.Required(true)
+ bodyParameter.DataType(typeAsName)
+ b.Param(bodyParameter)
+ return b
+}
+
+// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
+// Use this to modify or extend information for the Parameter (through its Data()).
+func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
+ for _, each := range b.parameters {
+ if each.Data().Name == name {
+ return each
+ }
+ }
+ return p
+}
+
+// Writes tells what resource type will be written as the response payload. Optional.
+func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
+ b.writeSample = sample
+ return b
+}
+
+// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
+func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
+ if b.parameters == nil {
+ b.parameters = []*Parameter{}
+ }
+ b.parameters = append(b.parameters, parameter)
+ return b
+}
+
+// Operation allows you to document what the actual method/function call is of the Route.
+// Unless called, the operation name is derived from the RouteFunction set using To(..).
+func (b *RouteBuilder) Operation(name string) *RouteBuilder {
+ b.operation = name
+ return b
+}
+
+// ReturnsError is deprecated, use Returns instead.
+func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
+ log.Print("ReturnsError is deprecated, use Returns instead.")
+ return b.Returns(code, message, model)
+}
+
+// Returns allows you to document what responses (errors or regular) can be expected.
+// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
+func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
+ err := ResponseError{
+ Code: code,
+ Message: message,
+ Model: model,
+ IsDefault: false, // this field is deprecated, use default response instead.
+ }
+ // lazy init because there is no NewRouteBuilder (yet)
+ if b.errorMap == nil {
+ b.errorMap = map[int]ResponseError{}
+ }
+ b.errorMap[code] = err
+ return b
+}
+
+// DefaultReturns is a special Returns call that sets the default of the response.
+func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
+ b.defaultResponse = &ResponseError{
+ Message: message,
+ Model: model,
+ }
+ return b
+}
+
+// Metadata adds or updates a key=value pair to the metadata map.
+func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
+ if b.metadata == nil {
+ b.metadata = map[string]interface{}{}
+ }
+ b.metadata[key] = value
+ return b
+}
+
+// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use
+func (b *RouteBuilder) Deprecate() *RouteBuilder {
+ b.deprecated = true
+ return b
+}
+
+// ResponseError represents a response; not necessarily an error.
+type ResponseError struct {
+ Code int
+ Message string
+ Model interface{}
+ IsDefault bool
+}
+
+func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
+ b.rootPath = path
+ return b
+}
+
+// Filter appends a FilterFunction to the end of filters for this Route to build.
+func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
+ b.filters = append(b.filters, filter)
+ return b
+}
+
+// If sets a condition function that controls matching the Route based on custom logic.
+// The condition function is provided the HTTP request and should return true if the route
+// should be considered.
+//
+// Efficiency note: the condition function is called before checking the method, produces, and
+// consumes criteria, so that the correct HTTP status code can be returned.
+//
+// Lifecycle note: no filter functions have been called prior to calling the condition function,
+// so the condition function should not depend on any context that might be set up by container
+// or route filters.
+func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
+ b.conditions = append(b.conditions, condition)
+ return b
+}
+
+// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response.
+func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder {
+ b.contentEncodingEnabled = &enabled
+ return b
+}
+
+// If no specific Route path then set to rootPath
+// If no specific Produces then set to rootProduces
+// If no specific Consumes then set to rootConsumes
+func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
+ if len(b.produces) == 0 {
+ b.produces = rootProduces
+ }
+ if len(b.consumes) == 0 {
+ b.consumes = rootConsumes
+ }
+}
+
+// typeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions.
+func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
+ b.typeNameHandleFunc = handler
+ return b
+}
+
+// Build creates a new Route using the specification details collected by the RouteBuilder
+func (b *RouteBuilder) Build() Route {
+ pathExpr, err := newPathExpression(b.currentPath)
+ if err != nil {
+ log.Printf("Invalid path:%s because:%v", b.currentPath, err)
+ os.Exit(1)
+ }
+ if b.function == nil {
+ log.Printf("No function specified for route:" + b.currentPath)
+ os.Exit(1)
+ }
+ operationName := b.operation
+ if len(operationName) == 0 && b.function != nil {
+ // extract from definition
+ operationName = nameOfFunction(b.function)
+ }
+ route := Route{
+ Method: b.httpMethod,
+ Path: concatPath(b.rootPath, b.currentPath),
+ Produces: b.produces,
+ Consumes: b.consumes,
+ Function: b.function,
+ Filters: b.filters,
+ If: b.conditions,
+ relativePath: b.currentPath,
+ pathExpr: pathExpr,
+ Doc: b.doc,
+ Notes: b.notes,
+ Operation: operationName,
+ ParameterDocs: b.parameters,
+ ResponseErrors: b.errorMap,
+ DefaultResponse: b.defaultResponse,
+ ReadSample: b.readSample,
+ WriteSample: b.writeSample,
+ Metadata: b.metadata,
+ Deprecated: b.deprecated,
+ contentEncodingEnabled: b.contentEncodingEnabled,
+ }
+ route.postBuild()
+ return route
+}
+
+func concatPath(path1, path2 string) string {
+ return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+}
+
+var anonymousFuncCount int32
+
+// nameOfFunction returns the short name of the function f for documentation.
+// It uses a runtime feature for debugging ; its value may change for later Go versions.
+func nameOfFunction(f interface{}) string {
+ fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
+ tokenized := strings.Split(fun.Name(), ".")
+ last := tokenized[len(tokenized)-1]
+ last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, ")-fm") // Go 1.5
+ last = strings.TrimSuffix(last, "·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, "-fm") // Go 1.5
+ if last == "func1" { // this could mean conflicts in API docs
+ val := atomic.AddInt32(&anonymousFuncCount, 1)
+ last = "func" + fmt.Sprintf("%d", val)
+ atomic.StoreInt32(&anonymousFuncCount, val)
+ }
+ return last
+}
diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go
new file mode 100644
index 0000000..19078af
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/router.go
@@ -0,0 +1,20 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "net/http"
+
+// A RouteSelector finds the best matching Route given the input HTTP Request
+// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the
+// path parameters after the route has been selected.
+type RouteSelector interface {
+
+ // SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
+ // It returns a selected Route and its containing WebService or an error indicating
+ // a problem.
+ SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
+}
diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go
new file mode 100644
index 0000000..62d1108
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/service_error.go
@@ -0,0 +1,23 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "fmt"
+
+// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
+type ServiceError struct {
+ Code int
+ Message string
+}
+
+// NewError returns a ServiceError using the code and reason
+func NewError(code int, message string) ServiceError {
+ return ServiceError{Code: code, Message: message}
+}
+
+// Error returns a text representation of the service error
+func (s ServiceError) Error() string {
+ return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
+}
diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go
new file mode 100644
index 0000000..77ba9a8
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/web_service.go
@@ -0,0 +1,290 @@
+package restful
+
+import (
+ "errors"
+ "os"
+ "reflect"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
+type WebService struct {
+ rootPath string
+ pathExpr *pathExpression // cached compilation of rootPath as RegExp
+ routes []Route
+ produces []string
+ consumes []string
+ pathParameters []*Parameter
+ filters []FilterFunction
+ documentation string
+ apiVersion string
+
+ typeNameHandleFunc TypeNameHandleFunction
+
+ dynamicRoutes bool
+
+ // protects 'routes' if dynamic routes are enabled
+ routesLock sync.RWMutex
+}
+
+func (w *WebService) SetDynamicRoutes(enable bool) {
+ w.dynamicRoutes = enable
+}
+
+// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
+// into the restful documentation for the service.
+type TypeNameHandleFunction func(sample interface{}) string
+
+// TypeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions. If not set, the web service will invoke
+// reflect.TypeOf(object).String().
+func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
+ w.typeNameHandleFunc = handler
+ return w
+}
+
+// reflectTypeName is the default TypeNameHandleFunction and for a given object
+// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
+// the reflection API.
+func reflectTypeName(sample interface{}) string {
+ return reflect.TypeOf(sample).String()
+}
+
+// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
+func (w *WebService) compilePathExpression() {
+ compiled, err := newPathExpression(w.rootPath)
+ if err != nil {
+ log.Printf("invalid path:%s because:%v", w.rootPath, err)
+ os.Exit(1)
+ }
+ w.pathExpr = compiled
+}
+
+// ApiVersion sets the API version for documentation purposes.
+func (w *WebService) ApiVersion(apiVersion string) *WebService {
+ w.apiVersion = apiVersion
+ return w
+}
+
+// Version returns the API version for documentation purposes.
+func (w *WebService) Version() string { return w.apiVersion }
+
+// Path specifies the root URL template path of the WebService.
+// All Routes will be relative to this path.
+func (w *WebService) Path(root string) *WebService {
+ w.rootPath = root
+ if len(w.rootPath) == 0 {
+ w.rootPath = "/"
+ }
+ w.compilePathExpression()
+ return w
+}
+
+// Param adds a PathParameter to document parameters used in the root path.
+func (w *WebService) Param(parameter *Parameter) *WebService {
+ if w.pathParameters == nil {
+ w.pathParameters = []*Parameter{}
+ }
+ w.pathParameters = append(w.pathParameters, parameter)
+ return w
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) PathParameter(name, description string) *Parameter {
+ return PathParameter(name, description)
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func PathParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
+ p.bePath()
+ return p
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) QueryParameter(name, description string) *Parameter {
+ return QueryParameter(name, description)
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func QueryParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}}
+ p.beQuery()
+ return p
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func (w *WebService) BodyParameter(name, description string) *Parameter {
+ return BodyParameter(name, description)
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func BodyParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
+ p.beBody()
+ return p
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) HeaderParameter(name, description string) *Parameter {
+ return HeaderParameter(name, description)
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func HeaderParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beHeader()
+ return p
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) FormParameter(name, description string) *Parameter {
+ return FormParameter(name, description)
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func FormParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beForm()
+ return p
+}
+
+// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
+func (w *WebService) Route(builder *RouteBuilder) *WebService {
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ builder.copyDefaults(w.produces, w.consumes)
+ w.routes = append(w.routes, builder.Build())
+ return w
+}
+
+// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
+func (w *WebService) RemoveRoute(path, method string) error {
+ if !w.dynamicRoutes {
+ return errors.New("dynamic routes are not enabled.")
+ }
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ newRoutes := make([]Route, (len(w.routes) - 1))
+ current := 0
+ for ix := range w.routes {
+ if w.routes[ix].Method == method && w.routes[ix].Path == path {
+ continue
+ }
+ newRoutes[current] = w.routes[ix]
+ current = current + 1
+ }
+ w.routes = newRoutes
+ return nil
+}
+
+// Method creates a new RouteBuilder and initialize its http method
+func (w *WebService) Method(httpMethod string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
+}
+
+// Produces specifies that this WebService can produce one or more MIME types.
+// Http requests must have one of these values set for the Accept header.
+func (w *WebService) Produces(contentTypes ...string) *WebService {
+ w.produces = contentTypes
+ return w
+}
+
+// Consumes specifies that this WebService can consume one or more MIME types.
+// Http requests must have one of these values set for the Content-Type header.
+func (w *WebService) Consumes(accepts ...string) *WebService {
+ w.consumes = accepts
+ return w
+}
+
+// Routes returns the Routes associated with this WebService
+func (w *WebService) Routes() []Route {
+ if !w.dynamicRoutes {
+ return w.routes
+ }
+ // Make a copy of the array to prevent concurrency problems
+ w.routesLock.RLock()
+ defer w.routesLock.RUnlock()
+ result := make([]Route, len(w.routes))
+ for ix := range w.routes {
+ result[ix] = w.routes[ix]
+ }
+ return result
+}
+
+// RootPath returns the RootPath associated with this WebService. Default "/"
+func (w *WebService) RootPath() string {
+ return w.rootPath
+}
+
+// PathParameters return the path parameter names for (shared among its Routes)
+func (w *WebService) PathParameters() []*Parameter {
+ return w.pathParameters
+}
+
+// Filter adds a filter function to the chain of filters applicable to all its Routes
+func (w *WebService) Filter(filter FilterFunction) *WebService {
+ w.filters = append(w.filters, filter)
+ return w
+}
+
+// Doc is used to set the documentation of this service.
+func (w *WebService) Doc(plainText string) *WebService {
+ w.documentation = plainText
+ return w
+}
+
+// Documentation returns it.
+func (w *WebService) Documentation() string {
+ return w.documentation
+}
+
+/*
+ Convenience methods
+*/
+
+// HEAD is a shortcut for .Method("HEAD").Path(subPath)
+func (w *WebService) HEAD(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
+}
+
+// GET is a shortcut for .Method("GET").Path(subPath)
+func (w *WebService) GET(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
+}
+
+// POST is a shortcut for .Method("POST").Path(subPath)
+func (w *WebService) POST(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
+}
+
+// PUT is a shortcut for .Method("PUT").Path(subPath)
+func (w *WebService) PUT(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
+}
+
+// PATCH is a shortcut for .Method("PATCH").Path(subPath)
+func (w *WebService) PATCH(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
+}
+
+// DELETE is a shortcut for .Method("DELETE").Path(subPath)
+func (w *WebService) DELETE(subPath string) *RouteBuilder {
+ return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
+}
diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go
new file mode 100644
index 0000000..c9d31b0
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/web_service_container.go
@@ -0,0 +1,39 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+)
+
+// DefaultContainer is a restful.Container that uses http.DefaultServeMux
+var DefaultContainer *Container
+
+func init() {
+ DefaultContainer = NewContainer()
+ DefaultContainer.ServeMux = http.DefaultServeMux
+}
+
+// If set the true then panics will not be caught to return HTTP 500.
+// In that case, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
+var DoNotRecover = false
+
+// Add registers a new WebService add it to the DefaultContainer.
+func Add(service *WebService) {
+ DefaultContainer.Add(service)
+}
+
+// Filter appends a container FilterFunction from the DefaultContainer.
+// These are called before dispatching a http.Request to a WebService.
+func Filter(filter FilterFunction) {
+ DefaultContainer.Filter(filter)
+}
+
+// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
+func RegisteredWebServices() []*WebService {
+ return DefaultContainer.RegisteredWebServices()
+}
diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml
new file mode 100644
index 0000000..50e4afd
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+go:
+ - 1.14
+ - 1.13
+
+install:
+ - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+ - go get github.com/jessevdk/go-flags
+
+script:
+ - go get
+ - go test -cover ./...
+ - cd ./v5
+ - go get
+ - go test -cover ./...
+
+notifications:
+ email: false
diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE
new file mode 100644
index 0000000..df76d7d
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2014, Evan Phoenix
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the Evan Phoenix nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md
new file mode 100644
index 0000000..121b039
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/README.md
@@ -0,0 +1,298 @@
+# JSON-Patch
+`jsonpatch` is a library which provides functionality for both applying
+[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
+well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
+
+[](http://godoc.org/github.com/evanphx/json-patch)
+[](https://travis-ci.org/evanphx/json-patch)
+[](https://goreportcard.com/report/github.com/evanphx/json-patch)
+
+# Get It!
+
+**Latest and greatest**:
+```bash
+go get -u github.com/evanphx/json-patch/v5
+```
+
+**Stable Versions**:
+* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
+* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
+
+(previous versions below `v3` are unavailable)
+
+# Use It!
+* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
+* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
+* [Comparing JSON documents](#comparing-json-documents)
+* [Combine merge patches](#combine-merge-patches)
+
+
+# Configuration
+
+* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
+ This defaults to `true` and enables the non-standard practice of allowing
+ negative indices to mean indices starting at the end of an array. This
+ functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
+ false`.
+
+* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
+ which limits the total size increase in bytes caused by "copy" operations in a
+ patch. It defaults to 0, which means there is no limit.
+
+## Create and apply a merge patch
+Given both an original JSON document and a modified JSON document, you can create
+a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
+
+It can describe the changes needed to convert from the original to the
+modified JSON document.
+
+Once you have a merge patch, you can apply it to other JSON documents using the
+`jsonpatch.MergePatch(document, patch)` function.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ // Let's create a merge patch from these two documents...
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ target := []byte(`{"name": "Jane", "age": 24}`)
+
+ patch, err := jsonpatch.CreateMergePatch(original, target)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now lets apply the patch against a different JSON document...
+
+ alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
+ modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
+
+ fmt.Printf("patch document: %s\n", patch)
+ fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+patch document: {"height":null,"name":"Jane"}
+updated alternative doc: {"age":28,"name":"Jane"}
+```
+
+## Create and apply a JSON Patch
+You can create patch objects using `DecodePatch([]byte)`, which can then
+be applied against JSON documents.
+
+The following is an example of creating a patch from two operations, and
+applying it against a JSON document.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ patchJSON := []byte(`[
+ {"op": "replace", "path": "/name", "value": "Jane"},
+ {"op": "remove", "path": "/height"}
+ ]`)
+
+ patch, err := jsonpatch.DecodePatch(patchJSON)
+ if err != nil {
+ panic(err)
+ }
+
+ modified, err := patch.Apply(original)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Original document: %s\n", original)
+ fmt.Printf("Modified document: %s\n", modified)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+Original document: {"name": "John", "age": 24, "height": 3.21}
+Modified document: {"age":24,"name":"Jane"}
+```
+
+## Comparing JSON documents
+Due to potential whitespace and ordering differences, one cannot simply compare
+JSON strings or byte-arrays directly.
+
+As such, you can instead use `jsonpatch.Equal(document1, document2)` to
+determine if two JSON documents are _structurally_ equal. This ignores
+whitespace differences, and key-value ordering.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ similar := []byte(`
+ {
+ "age": 24,
+ "height": 3.21,
+ "name": "John"
+ }
+ `)
+ different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
+
+ if jsonpatch.Equal(original, similar) {
+ fmt.Println(`"original" is structurally equal to "similar"`)
+ }
+
+ if !jsonpatch.Equal(original, different) {
+ fmt.Println(`"original" is _not_ structurally equal to "different"`)
+ }
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+"original" is structurally equal to "similar"
+"original" is _not_ structurally equal to "different"
+```
+
+## Combine merge patches
+Given two JSON merge patch documents, it is possible to combine them into a
+single merge patch which can describe both set of changes.
+
+The resulting merge patch can be used such that applying it results in a
+document structurally similar as merging each merge patch to the document
+in succession.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+
+ nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
+ ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
+
+ // Let's combine these merge patch documents...
+ combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
+ if err != nil {
+ panic(err)
+ }
+
+ // Apply each patch individual against the original document
+ withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
+ if err != nil {
+ panic(err)
+ }
+
+ withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
+ if err != nil {
+ panic(err)
+ }
+
+ // Apply the combined patch against the original document
+
+ withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
+ if err != nil {
+ panic(err)
+ }
+
+ // Do both result in the same thing? They should!
+ if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
+ fmt.Println("Both JSON documents are structurally the same!")
+ }
+
+ fmt.Printf("combined merge patch: %s", combinedPatch)
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+Both JSON documents are structurally the same!
+combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
+```
+
+# CLI for comparing JSON documents
+You can install the commandline program `json-patch`.
+
+This program can take multiple JSON patch documents as arguments,
+and fed a JSON document from `stdin`. It will apply the patch(es) against
+the document and output the modified doc.
+
+**patch.1.json**
+```json
+[
+ {"op": "replace", "path": "/name", "value": "Jane"},
+ {"op": "remove", "path": "/height"}
+]
+```
+
+**patch.2.json**
+```json
+[
+ {"op": "add", "path": "/address", "value": "123 Main St"},
+ {"op": "replace", "path": "/age", "value": "21"}
+]
+```
+
+**document.json**
+```json
+{
+ "name": "John",
+ "age": 24,
+ "height": 3.21
+}
+```
+
+You can then run:
+
+```bash
+$ go install github.com/evanphx/json-patch/cmd/json-patch
+$ cat document.json | json-patch -p patch.1.json -p patch.2.json
+{"address":"123 Main St","age":"21","name":"Jane"}
+```
+
+# Help It!
+Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
+or [create a PR](https://github.com/evanphx/json-patch/compare).
+
+
+Before creating a pull request, we'd ask that you make sure tests are passing
+and that you have added new tests when applicable.
+
+Contributors can run tests using:
+
+```bash
+go test -cover ./...
+```
+
+Builds for pull requests are tested automatically
+using [TravisCI](https://travis-ci.org/evanphx/json-patch).
diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go
new file mode 100644
index 0000000..75304b4
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/errors.go
@@ -0,0 +1,38 @@
+package jsonpatch
+
+import "fmt"
+
+// AccumulatedCopySizeError is an error type returned when the accumulated size
+// increase caused by copy operations in a patch operation has exceeded the
+// limit.
+type AccumulatedCopySizeError struct {
+ limit int64
+ accumulated int64
+}
+
+// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
+func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
+ return &AccumulatedCopySizeError{limit: l, accumulated: a}
+}
+
+// Error implements the error interface.
+func (a *AccumulatedCopySizeError) Error() string {
+ return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
+}
+
+// ArraySizeError is an error type returned when the array size has exceeded
+// the limit.
+type ArraySizeError struct {
+ limit int
+ size int
+}
+
+// NewArraySizeError returns an ArraySizeError.
+func NewArraySizeError(l, s int) *ArraySizeError {
+ return &ArraySizeError{limit: l, size: s}
+}
+
+// Error implements the error interface.
+func (a *ArraySizeError) Error() string {
+ return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
+}
diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go
new file mode 100644
index 0000000..14e8bb5
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/merge.go
@@ -0,0 +1,386 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
+ curDoc, err := cur.intoDoc()
+
+ if err != nil {
+ pruneNulls(patch)
+ return patch
+ }
+
+ patchDoc, err := patch.intoDoc()
+
+ if err != nil {
+ return patch
+ }
+
+ mergeDocs(curDoc, patchDoc, mergeMerge)
+
+ return cur
+}
+
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+ for k, v := range *patch {
+ if v == nil {
+ if mergeMerge {
+ (*doc)[k] = nil
+ } else {
+ delete(*doc, k)
+ }
+ } else {
+ cur, ok := (*doc)[k]
+
+ if !ok || cur == nil {
+ pruneNulls(v)
+ (*doc)[k] = v
+ } else {
+ (*doc)[k] = merge(cur, v, mergeMerge)
+ }
+ }
+ }
+}
+
+func pruneNulls(n *lazyNode) {
+ sub, err := n.intoDoc()
+
+ if err == nil {
+ pruneDocNulls(sub)
+ } else {
+ ary, err := n.intoAry()
+
+ if err == nil {
+ pruneAryNulls(ary)
+ }
+ }
+}
+
+func pruneDocNulls(doc *partialDoc) *partialDoc {
+ for k, v := range *doc {
+ if v == nil {
+ delete(*doc, k)
+ } else {
+ pruneNulls(v)
+ }
+ }
+
+ return doc
+}
+
+func pruneAryNulls(ary *partialArray) *partialArray {
+ newAry := []*lazyNode{}
+
+ for _, v := range *ary {
+ if v != nil {
+ pruneNulls(v)
+ newAry = append(newAry, v)
+ }
+ }
+
+ *ary = newAry
+
+ return ary
+}
+
+var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
+
+// MergeMergePatches merges two merge patches together, such that
+// applying this resulting merged merge patch to a document yields the same
+// as merging each merge patch to the document in succession.
+func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
+ return doMergePatch(patch1Data, patch2Data, true)
+}
+
+// MergePatch merges the patchData into the docData.
+func MergePatch(docData, patchData []byte) ([]byte, error) {
+ return doMergePatch(docData, patchData, false)
+}
+
+func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
+ doc := &partialDoc{}
+
+ docErr := json.Unmarshal(docData, doc)
+
+ patch := &partialDoc{}
+
+ patchErr := json.Unmarshal(patchData, patch)
+
+ if _, ok := docErr.(*json.SyntaxError); ok {
+ return nil, errBadJSONDoc
+ }
+
+ if _, ok := patchErr.(*json.SyntaxError); ok {
+ return nil, errBadJSONPatch
+ }
+
+ if docErr == nil && *doc == nil {
+ return nil, errBadJSONDoc
+ }
+
+ if patchErr == nil && *patch == nil {
+ return nil, errBadJSONPatch
+ }
+
+ if docErr != nil || patchErr != nil {
+ // Not an error, just not a doc, so we turn straight into the patch
+ if patchErr == nil {
+ if mergeMerge {
+ doc = patch
+ } else {
+ doc = pruneDocNulls(patch)
+ }
+ } else {
+ patchAry := &partialArray{}
+ patchErr = json.Unmarshal(patchData, patchAry)
+
+ if patchErr != nil {
+ return nil, errBadJSONPatch
+ }
+
+ pruneAryNulls(patchAry)
+
+ out, patchErr := json.Marshal(patchAry)
+
+ if patchErr != nil {
+ return nil, errBadJSONPatch
+ }
+
+ return out, nil
+ }
+ } else {
+ mergeDocs(doc, patch, mergeMerge)
+ }
+
+ return json.Marshal(doc)
+}
+
+// resemblesJSONArray indicates whether the byte-slice "appears" to be
+// a JSON array or not.
+// False-positives are possible, as this function does not check the internal
+// structure of the array. It only checks that the outer syntax is present and
+// correct.
+func resemblesJSONArray(input []byte) bool {
+ input = bytes.TrimSpace(input)
+
+ hasPrefix := bytes.HasPrefix(input, []byte("["))
+ hasSuffix := bytes.HasSuffix(input, []byte("]"))
+
+ return hasPrefix && hasSuffix
+}
+
+// CreateMergePatch will return a merge patch document capable of converting
+// the original document(s) to the modified document(s).
+// The parameters can be bytes of either two JSON Documents, or two arrays of
+// JSON documents.
+// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
+func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalResemblesArray := resemblesJSONArray(originalJSON)
+ modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
+
+ // Do both byte-slices seem like JSON arrays?
+ if originalResemblesArray && modifiedResemblesArray {
+ return createArrayMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // Are both byte-slices are not arrays? Then they are likely JSON objects...
+ if !originalResemblesArray && !modifiedResemblesArray {
+ return createObjectMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // None of the above? Then return an error because of mismatched types.
+ return nil, errBadMergeTypes
+}
+
+// createObjectMergePatch will return a merge-patch document capable of
+// converting the original document to the modified document.
+func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDoc := map[string]interface{}{}
+ modifiedDoc := map[string]interface{}{}
+
+ err := json.Unmarshal(originalJSON, &originalDoc)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDoc)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ dest, err := getDiff(originalDoc, modifiedDoc)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(dest)
+}
+
+// createArrayMergePatch will return an array of merge-patch documents capable
+// of converting the original document to the modified document for each
+// pair of JSON documents provided in the arrays.
+// Arrays of mismatched sizes will result in an error.
+func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDocs := []json.RawMessage{}
+ modifiedDocs := []json.RawMessage{}
+
+ err := json.Unmarshal(originalJSON, &originalDocs)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDocs)
+ if err != nil {
+ return nil, errBadJSONDoc
+ }
+
+ total := len(originalDocs)
+ if len(modifiedDocs) != total {
+ return nil, errBadJSONDoc
+ }
+
+ result := []json.RawMessage{}
+ for i := 0; i < len(originalDocs); i++ {
+ original := originalDocs[i]
+ modified := modifiedDocs[i]
+
+ patch, err := createObjectMergePatch(original, modified)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, json.RawMessage(patch))
+ }
+
+ return json.Marshal(result)
+}
+
+// Returns true if the array matches (must be json types).
+// As is idiomatic for go, an empty array is not the same as a nil array.
+func matchesArray(a, b []interface{}) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ if (a == nil && b != nil) || (a != nil && b == nil) {
+ return false
+ }
+ for i := range a {
+ if !matchesValue(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Returns true if the values matches (must be json types)
+// The types of the values must match, otherwise it will always return false
+// If two map[string]interface{} are given, all elements must match.
+func matchesValue(av, bv interface{}) bool {
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ return false
+ }
+ switch at := av.(type) {
+ case string:
+ bt := bv.(string)
+ if bt == at {
+ return true
+ }
+ case float64:
+ bt := bv.(float64)
+ if bt == at {
+ return true
+ }
+ case bool:
+ bt := bv.(bool)
+ if bt == at {
+ return true
+ }
+ case nil:
+ // Both nil, fine.
+ return true
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ if len(bt) != len(at) {
+ return false
+ }
+ for key := range bt {
+ av, aOK := at[key]
+ bv, bOK := bt[key]
+ if aOK != bOK {
+ return false
+ }
+ if !matchesValue(av, bv) {
+ return false
+ }
+ }
+ return true
+ case []interface{}:
+ bt := bv.([]interface{})
+ return matchesArray(at, bt)
+ }
+ return false
+}
+
+// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
+func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
+ into := map[string]interface{}{}
+ for key, bv := range b {
+ av, ok := a[key]
+ // value was added
+ if !ok {
+ into[key] = bv
+ continue
+ }
+ // If types have changed, replace completely
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ into[key] = bv
+ continue
+ }
+ // Types are the same, compare values
+ switch at := av.(type) {
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ dst := make(map[string]interface{}, len(bt))
+ dst, err := getDiff(at, bt)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) > 0 {
+ into[key] = dst
+ }
+ case string, float64, bool:
+ if !matchesValue(av, bv) {
+ into[key] = bv
+ }
+ case []interface{}:
+ bt := bv.([]interface{})
+ if !matchesArray(at, bt) {
+ into[key] = bv
+ }
+ case nil:
+ switch bv.(type) {
+ case nil:
+ // Both nil, fine.
+ default:
+ into[key] = bv
+ }
+ default:
+ panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
+ }
+ }
+ // Now add all deleted values as nil
+ for key := range a {
+ _, found := b[key]
+ if !found {
+ into[key] = nil
+ }
+ }
+ return into, nil
+}
diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go
new file mode 100644
index 0000000..f185a45
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/patch.go
@@ -0,0 +1,784 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ eRaw = iota
+ eDoc
+ eAry
+)
+
+var (
+ // SupportNegativeIndices decides whether to support non-standard practice of
+ // allowing negative indices to mean indices starting at the end of an array.
+ // Default to true.
+ SupportNegativeIndices bool = true
+ // AccumulatedCopySizeLimit limits the total size increase in bytes caused by
+ // "copy" operations in a patch.
+ AccumulatedCopySizeLimit int64 = 0
+)
+
+var (
+ ErrTestFailed = errors.New("test failed")
+ ErrMissing = errors.New("missing value")
+ ErrUnknownType = errors.New("unknown object type")
+ ErrInvalid = errors.New("invalid state detected")
+ ErrInvalidIndex = errors.New("invalid index referenced")
+)
+
+type lazyNode struct {
+ raw *json.RawMessage
+ doc partialDoc
+ ary partialArray
+ which int
+}
+
+// Operation is a single JSON-Patch step, such as a single 'add' operation.
+type Operation map[string]*json.RawMessage
+
+// Patch is an ordered collection of Operations.
+type Patch []Operation
+
+type partialDoc map[string]*lazyNode
+type partialArray []*lazyNode
+
+type container interface {
+ get(key string) (*lazyNode, error)
+ set(key string, val *lazyNode) error
+ add(key string, val *lazyNode) error
+ remove(key string) error
+}
+
+func newLazyNode(raw *json.RawMessage) *lazyNode {
+ return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
+}
+
+func (n *lazyNode) MarshalJSON() ([]byte, error) {
+ switch n.which {
+ case eRaw:
+ return json.Marshal(n.raw)
+ case eDoc:
+ return json.Marshal(n.doc)
+ case eAry:
+ return json.Marshal(n.ary)
+ default:
+ return nil, ErrUnknownType
+ }
+}
+
+func (n *lazyNode) UnmarshalJSON(data []byte) error {
+ dest := make(json.RawMessage, len(data))
+ copy(dest, data)
+ n.raw = &dest
+ n.which = eRaw
+ return nil
+}
+
+func deepCopy(src *lazyNode) (*lazyNode, int, error) {
+ if src == nil {
+ return nil, 0, nil
+ }
+ a, err := src.MarshalJSON()
+ if err != nil {
+ return nil, 0, err
+ }
+ sz := len(a)
+ ra := make(json.RawMessage, sz)
+ copy(ra, a)
+ return newLazyNode(&ra), sz, nil
+}
+
+func (n *lazyNode) intoDoc() (*partialDoc, error) {
+ if n.which == eDoc {
+ return &n.doc, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eDoc
+ return &n.doc, nil
+}
+
+func (n *lazyNode) intoAry() (*partialArray, error) {
+ if n.which == eAry {
+ return &n.ary, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eAry
+ return &n.ary, nil
+}
+
+func (n *lazyNode) compact() []byte {
+ buf := &bytes.Buffer{}
+
+ if n.raw == nil {
+ return nil
+ }
+
+ err := json.Compact(buf, *n.raw)
+
+ if err != nil {
+ return *n.raw
+ }
+
+ return buf.Bytes()
+}
+
+func (n *lazyNode) tryDoc() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eDoc
+ return true
+}
+
+func (n *lazyNode) tryAry() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eAry
+ return true
+}
+
+func (n *lazyNode) equal(o *lazyNode) bool {
+ if n.which == eRaw {
+ if !n.tryDoc() && !n.tryAry() {
+ if o.which != eRaw {
+ return false
+ }
+
+ return bytes.Equal(n.compact(), o.compact())
+ }
+ }
+
+ if n.which == eDoc {
+ if o.which == eRaw {
+ if !o.tryDoc() {
+ return false
+ }
+ }
+
+ if o.which != eDoc {
+ return false
+ }
+
+ if len(n.doc) != len(o.doc) {
+ return false
+ }
+
+ for k, v := range n.doc {
+ ov, ok := o.doc[k]
+
+ if !ok {
+ return false
+ }
+
+ if (v == nil) != (ov == nil) {
+ return false
+ }
+
+ if v == nil && ov == nil {
+ continue
+ }
+
+ if !v.equal(ov) {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ if o.which != eAry && !o.tryAry() {
+ return false
+ }
+
+ if len(n.ary) != len(o.ary) {
+ return false
+ }
+
+ for idx, val := range n.ary {
+ if !val.equal(o.ary[idx]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Kind reads the "op" field of the Operation.
+func (o Operation) Kind() string {
+ if obj, ok := o["op"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown"
+ }
+
+ return op
+ }
+
+ return "unknown"
+}
+
+// Path reads the "path" field of the Operation.
+func (o Operation) Path() (string, error) {
+ if obj, ok := o["path"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+}
+
+// From reads the "from" field of the Operation.
+func (o Operation) From() (string, error) {
+ if obj, ok := o["from"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+}
+
+func (o Operation) value() *lazyNode {
+ if obj, ok := o["value"]; ok {
+ return newLazyNode(obj)
+ }
+
+ return nil
+}
+
+// ValueInterface decodes the operation value into an interface.
+func (o Operation) ValueInterface() (interface{}, error) {
+ if obj, ok := o["value"]; ok && obj != nil {
+ var v interface{}
+
+ err := json.Unmarshal(*obj, &v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+}
+
+func isArray(buf []byte) bool {
+Loop:
+ for _, c := range buf {
+ switch c {
+ case ' ':
+ case '\n':
+ case '\t':
+ continue
+ case '[':
+ return true
+ default:
+ break Loop
+ }
+ }
+
+ return false
+}
+
+func findObject(pd *container, path string) (container, string) {
+ doc := *pd
+
+ split := strings.Split(path, "/")
+
+ if len(split) < 2 {
+ return nil, ""
+ }
+
+ parts := split[1 : len(split)-1]
+
+ key := split[len(split)-1]
+
+ var err error
+
+ for _, part := range parts {
+
+ next, ok := doc.get(decodePatchKey(part))
+
+ if next == nil || ok != nil {
+ return nil, ""
+ }
+
+ if isArray(*next.raw) {
+ doc, err = next.intoAry()
+
+ if err != nil {
+ return nil, ""
+ }
+ } else {
+ doc, err = next.intoDoc()
+
+ if err != nil {
+ return nil, ""
+ }
+ }
+ }
+
+ return doc, decodePatchKey(key)
+}
+
+func (d *partialDoc) set(key string, val *lazyNode) error {
+ (*d)[key] = val
+ return nil
+}
+
+func (d *partialDoc) add(key string, val *lazyNode) error {
+ (*d)[key] = val
+ return nil
+}
+
+func (d *partialDoc) get(key string) (*lazyNode, error) {
+ return (*d)[key], nil
+}
+
+func (d *partialDoc) remove(key string) error {
+ _, ok := (*d)[key]
+ if !ok {
+ return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
+ }
+
+ delete(*d, key)
+ return nil
+}
+
+// set should only be used to implement the "replace" operation, so "key" must
+// be an already existing index in "d".
+func (d *partialArray) set(key string, val *lazyNode) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+ (*d)[idx] = val
+ return nil
+}
+
+func (d *partialArray) add(key string, val *lazyNode) error {
+ if key == "-" {
+ *d = append(*d, val)
+ return nil
+ }
+
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+ }
+
+ sz := len(*d) + 1
+
+ ary := make([]*lazyNode, sz)
+
+ cur := *d
+
+ if idx >= len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(ary)
+ }
+
+ copy(ary[0:idx], cur[0:idx])
+ ary[idx] = val
+ copy(ary[idx+1:], cur[idx:])
+
+ *d = ary
+ return nil
+}
+
+func (d *partialArray) get(key string) (*lazyNode, error) {
+ idx, err := strconv.Atoi(key)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if idx >= len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ return (*d)[idx], nil
+}
+
+func (d *partialArray) remove(key string) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ cur := *d
+
+ if idx >= len(cur) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(cur) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(cur)
+ }
+
+ ary := make([]*lazyNode, len(cur)-1)
+
+ copy(ary[0:idx], cur[0:idx])
+ copy(ary[idx:], cur[idx+1:])
+
+ *d = ary
+ return nil
+
+}
+
+func (p Patch) add(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.add(key, op.value())
+ if err != nil {
+ return errors.Wrapf(err, "error in add for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) remove(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.remove(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) replace(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "replace operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+ }
+
+ _, ok := con.get(key)
+ if ok != nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+ }
+
+ err = con.set(key, op.value())
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) move(doc *container, op Operation) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ err = con.remove(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ err = con.add(key, val)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) test(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "test operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in test for path: '%s'", path)
+ }
+
+ if val == nil {
+ if op.value().raw == nil {
+ return nil
+ }
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ } else if op.value() == nil {
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ if val.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+}
+
+func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "copy operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in copy for from: '%s'", from)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ valCopy, sz, err := deepCopy(val)
+ if err != nil {
+ return errors.Wrapf(err, "error while performing deep copy")
+ }
+
+ (*accumulatedCopySize) += int64(sz)
+ if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
+ return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
+ }
+
+ err = con.add(key, valCopy)
+ if err != nil {
+ return errors.Wrapf(err, "error while adding value during copy")
+ }
+
+ return nil
+}
+
+// Equal indicates if 2 JSON documents have the same structural equality.
+func Equal(a, b []byte) bool {
+ ra := make(json.RawMessage, len(a))
+ copy(ra, a)
+ la := newLazyNode(&ra)
+
+ rb := make(json.RawMessage, len(b))
+ copy(rb, b)
+ lb := newLazyNode(&rb)
+
+ return la.equal(lb)
+}
+
+// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
+func DecodePatch(buf []byte) (Patch, error) {
+ var p Patch
+
+ err := json.Unmarshal(buf, &p)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// Apply mutates a JSON document according to the patch, and returns the new
+// document.
+func (p Patch) Apply(doc []byte) ([]byte, error) {
+ return p.ApplyIndent(doc, "")
+}
+
+// ApplyIndent mutates a JSON document according to the patch, and returns the new
+// document indented.
+func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+ var pd container
+ if doc[0] == '[' {
+ pd = &partialArray{}
+ } else {
+ pd = &partialDoc{}
+ }
+
+ err := json.Unmarshal(doc, pd)
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = nil
+
+ var accumulatedCopySize int64
+
+ for _, op := range p {
+ switch op.Kind() {
+ case "add":
+ err = p.add(&pd, op)
+ case "remove":
+ err = p.remove(&pd, op)
+ case "replace":
+ err = p.replace(&pd, op)
+ case "move":
+ err = p.move(&pd, op)
+ case "test":
+ err = p.test(&pd, op)
+ case "copy":
+ err = p.copy(&pd, op, &accumulatedCopySize)
+ default:
+ err = fmt.Errorf("Unexpected kind: %s", op.Kind())
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if indent != "" {
+ return json.MarshalIndent(pd, "", indent)
+ }
+
+ return json.Marshal(pd)
+}
+
+// From http://tools.ietf.org/html/rfc6901#section-4 :
+//
+// Evaluation of each reference token begins by decoding any escaped
+// character sequence. This is performed by first transforming any
+// occurrence of the sequence '~1' to '/', and then transforming any
+// occurrence of the sequence '~0' to '~'.
+
+var (
+ rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
+)
+
+func decodePatchKey(k string) string {
+ return rfc6901Decoder.Replace(k)
+}
diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml
new file mode 100644
index 0000000..95f8a1f
--- /dev/null
+++ b/vendor/github.com/fatih/color/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+go:
+ - 1.8.x
+ - tip
+
diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock
new file mode 100644
index 0000000..7d879e9
--- /dev/null
+++ b/vendor/github.com/fatih/color/Gopkg.lock
@@ -0,0 +1,27 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/mattn/go-colorable"
+ packages = ["."]
+ revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
+ version = "v0.0.9"
+
+[[projects]]
+ name = "github.com/mattn/go-isatty"
+ packages = ["."]
+ revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
+ version = "v0.0.3"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml
new file mode 100644
index 0000000..ff1617f
--- /dev/null
+++ b/vendor/github.com/fatih/color/Gopkg.toml
@@ -0,0 +1,30 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ name = "github.com/mattn/go-colorable"
+ version = "0.0.9"
+
+[[constraint]]
+ name = "github.com/mattn/go-isatty"
+ version = "0.0.3"
diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md
new file mode 100644
index 0000000..25fdaf6
--- /dev/null
+++ b/vendor/github.com/fatih/color/LICENSE.md
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md
new file mode 100644
index 0000000..3fc9544
--- /dev/null
+++ b/vendor/github.com/fatih/color/README.md
@@ -0,0 +1,179 @@
+# Color [](https://godoc.org/github.com/fatih/color) [](https://travis-ci.org/fatih/color)
+
+
+
+Color lets you use colorized outputs in terms of [ANSI Escape
+Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
+has support for Windows too! The API can be used in several ways, pick one that
+suits you.
+
+
+
+
+
+## Install
+
+```bash
+go get github.com/fatih/color
+```
+
+Note that the `vendor` folder is here for stability. Remove the folder if you
+already have the dependencies in your GOPATH.
+
+## Examples
+
+### Standard colors
+
+```go
+// Print with default helper functions
+color.Cyan("Prints text in cyan.")
+
+// A newline will be appended automatically
+color.Blue("Prints %s in blue.", "text")
+
+// These are using the default foreground colors
+color.Red("We have red")
+color.Magenta("And many others ..")
+
+```
+
+### Mix and reuse colors
+
+```go
+// Create a new color object
+c := color.New(color.FgCyan).Add(color.Underline)
+c.Println("Prints cyan text with an underline.")
+
+// Or just add them to New()
+d := color.New(color.FgCyan, color.Bold)
+d.Printf("This prints bold cyan %s\n", "too!.")
+
+// Mix up foreground and background colors, create new mixes!
+red := color.New(color.FgRed)
+
+boldRed := red.Add(color.Bold)
+boldRed.Println("This will print text in bold red.")
+
+whiteBackground := red.Add(color.BgWhite)
+whiteBackground.Println("Red text with white background.")
+```
+
+### Use your own output (io.Writer)
+
+```go
+// Use your own io.Writer output
+color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
+
+blue := color.New(color.FgBlue)
+blue.Fprint(writer, "This will print text in blue.")
+```
+
+### Custom print functions (PrintFunc)
+
+```go
+// Create a custom print function for convenience
+red := color.New(color.FgRed).PrintfFunc()
+red("Warning")
+red("Error: %s", err)
+
+// Mix up multiple attributes
+notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
+notice("Don't forget this...")
+```
+
+### Custom fprint functions (FprintFunc)
+
+```go
+blue := color.New(FgBlue).FprintfFunc()
+blue(myWriter, "important notice: %s", stars)
+
+// Mix up with multiple attributes
+success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
+success(myWriter, "Don't forget this...")
+```
+
+### Insert into noncolor strings (SprintFunc)
+
+```go
+// Create SprintXxx functions to mix strings with other non-colorized strings:
+yellow := color.New(color.FgYellow).SprintFunc()
+red := color.New(color.FgRed).SprintFunc()
+fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
+
+info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
+fmt.Printf("This %s rocks!\n", info("package"))
+
+// Use helper functions
+fmt.Println("This", color.RedString("warning"), "should be not neglected.")
+fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
+
+// Windows supported too! Just don't forget to change the output to color.Output
+fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
+```
+
+### Plug into existing code
+
+```go
+// Use handy standard colors
+color.Set(color.FgYellow)
+
+fmt.Println("Existing text will now be in yellow")
+fmt.Printf("This one %s\n", "too")
+
+color.Unset() // Don't forget to unset
+
+// You can mix up parameters
+color.Set(color.FgMagenta, color.Bold)
+defer color.Unset() // Use it in your function
+
+fmt.Println("All text will now be bold magenta.")
+```
+
+### Disable/Enable color
+
+There might be a case where you want to explicitly disable/enable color output. the
+`go-isatty` package will automatically disable color output for non-tty output streams
+(for example if the output were piped directly to `less`)
+
+`Color` has support to disable/enable colors both globally and for single color
+definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You
+can easily disable the color output with:
+
+```go
+
+var flagNoColor = flag.Bool("no-color", false, "Disable color output")
+
+if *flagNoColor {
+ color.NoColor = true // disables colorized output
+}
+```
+
+It also has support for single color definitions (local). You can
+disable/enable color output on the fly:
+
+```go
+c := color.New(color.FgCyan)
+c.Println("Prints cyan text")
+
+c.DisableColor()
+c.Println("This is printed without any color")
+
+c.EnableColor()
+c.Println("This prints again cyan...")
+```
+
+## Todo
+
+* Save/Return previous values
+* Evaluate fmt.Formatter interface
+
+
+## Credits
+
+ * [Fatih Arslan](https://github.com/fatih)
+ * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
+
+## License
+
+The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
+
diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go
new file mode 100644
index 0000000..91c8e9f
--- /dev/null
+++ b/vendor/github.com/fatih/color/color.go
@@ -0,0 +1,603 @@
+package color
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+var (
+ // NoColor defines if the output is colorized or not. It's dynamically set to
+ // false or true based on the stdout's file descriptor referring to a terminal
+ // or not. This is a global option and affects all colors. For more control
+ // over each color block use the methods DisableColor() individually.
+ NoColor = os.Getenv("TERM") == "dumb" ||
+ (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
+
+ // Output defines the standard output of the print functions. By default
+ // os.Stdout is used.
+ Output = colorable.NewColorableStdout()
+
+ // Error defines a color supporting writer for os.Stderr.
+ Error = colorable.NewColorableStderr()
+
+ // colorsCache is used to reduce the count of created Color objects and
+ // allows to reuse already created objects with required Attribute.
+ colorsCache = make(map[Attribute]*Color)
+ colorsCacheMu sync.Mutex // protects colorsCache
+)
+
+// Color defines a custom color object which is defined by SGR parameters.
+type Color struct {
+ params []Attribute
+ noColor *bool
+}
+
+// Attribute defines a single SGR Code
+type Attribute int
+
+const escape = "\x1b"
+
+// Base attributes
+const (
+ Reset Attribute = iota
+ Bold
+ Faint
+ Italic
+ Underline
+ BlinkSlow
+ BlinkRapid
+ ReverseVideo
+ Concealed
+ CrossedOut
+)
+
+// Foreground text colors
+const (
+ FgBlack Attribute = iota + 30
+ FgRed
+ FgGreen
+ FgYellow
+ FgBlue
+ FgMagenta
+ FgCyan
+ FgWhite
+)
+
+// Foreground Hi-Intensity text colors
+const (
+ FgHiBlack Attribute = iota + 90
+ FgHiRed
+ FgHiGreen
+ FgHiYellow
+ FgHiBlue
+ FgHiMagenta
+ FgHiCyan
+ FgHiWhite
+)
+
+// Background text colors
+const (
+ BgBlack Attribute = iota + 40
+ BgRed
+ BgGreen
+ BgYellow
+ BgBlue
+ BgMagenta
+ BgCyan
+ BgWhite
+)
+
+// Background Hi-Intensity text colors
+const (
+ BgHiBlack Attribute = iota + 100
+ BgHiRed
+ BgHiGreen
+ BgHiYellow
+ BgHiBlue
+ BgHiMagenta
+ BgHiCyan
+ BgHiWhite
+)
+
+// New returns a newly created color object.
+func New(value ...Attribute) *Color {
+ c := &Color{params: make([]Attribute, 0)}
+ c.Add(value...)
+ return c
+}
+
+// Set sets the given parameters immediately. It will change the color of
+// output with the given SGR parameters until color.Unset() is called.
+func Set(p ...Attribute) *Color {
+ c := New(p...)
+ c.Set()
+ return c
+}
+
+// Unset resets all escape attributes and clears the output. Usually should
+// be called after Set().
+func Unset() {
+ if NoColor {
+ return
+ }
+
+ fmt.Fprintf(Output, "%s[%dm", escape, Reset)
+}
+
+// Set sets the SGR sequence.
+func (c *Color) Set() *Color {
+ if c.isNoColorSet() {
+ return c
+ }
+
+ fmt.Fprintf(Output, c.format())
+ return c
+}
+
+func (c *Color) unset() {
+ if c.isNoColorSet() {
+ return
+ }
+
+ Unset()
+}
+
+func (c *Color) setWriter(w io.Writer) *Color {
+ if c.isNoColorSet() {
+ return c
+ }
+
+ fmt.Fprintf(w, c.format())
+ return c
+}
+
+func (c *Color) unsetWriter(w io.Writer) {
+ if c.isNoColorSet() {
+ return
+ }
+
+ if NoColor {
+ return
+ }
+
+ fmt.Fprintf(w, "%s[%dm", escape, Reset)
+}
+
+// Add is used to chain SGR parameters. Use as many as parameters to combine
+// and create custom color objects. Example: Add(color.FgRed, color.Underline).
+func (c *Color) Add(value ...Attribute) *Color {
+ c.params = append(c.params, value...)
+ return c
+}
+
+func (c *Color) prepend(value Attribute) {
+ c.params = append(c.params, 0)
+ copy(c.params[1:], c.params[0:])
+ c.params[0] = value
+}
+
+// Fprint formats using the default formats for its operands and writes to w.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+// On Windows, users should wrap w with colorable.NewColorable() if w is of
+// type *os.File.
+func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ c.setWriter(w)
+ defer c.unsetWriter(w)
+
+ return fmt.Fprint(w, a...)
+}
+
+// Print formats using the default formats for its operands and writes to
+// standard output. Spaces are added between operands when neither is a
+// string. It returns the number of bytes written and any write error
+// encountered. This is the standard fmt.Print() method wrapped with the given
+// color.
+func (c *Color) Print(a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprint(Output, a...)
+}
+
+// Fprintf formats according to a format specifier and writes to w.
+// It returns the number of bytes written and any write error encountered.
+// On Windows, users should wrap w with colorable.NewColorable() if w is of
+// type *os.File.
+func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ c.setWriter(w)
+ defer c.unsetWriter(w)
+
+ return fmt.Fprintf(w, format, a...)
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+// This is the standard fmt.Printf() method wrapped with the given color.
+func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprintf(Output, format, a...)
+}
+
+// Fprintln formats using the default formats for its operands and writes to w.
+// Spaces are always added between operands and a newline is appended.
+// On Windows, users should wrap w with colorable.NewColorable() if w is of
+// type *os.File.
+func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ c.setWriter(w)
+ defer c.unsetWriter(w)
+
+ return fmt.Fprintln(w, a...)
+}
+
+// Println formats using the default formats for its operands and writes to
+// standard output. Spaces are always added between operands and a newline is
+// appended. It returns the number of bytes written and any write error
+// encountered. This is the standard fmt.Print() method wrapped with the given
+// color.
+func (c *Color) Println(a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprintln(Output, a...)
+}
+
+// Sprint is just like Print, but returns a string instead of printing it.
+func (c *Color) Sprint(a ...interface{}) string {
+ return c.wrap(fmt.Sprint(a...))
+}
+
+// Sprintln is just like Println, but returns a string instead of printing it.
+func (c *Color) Sprintln(a ...interface{}) string {
+ return c.wrap(fmt.Sprintln(a...))
+}
+
+// Sprintf is just like Printf, but returns a string instead of printing it.
+func (c *Color) Sprintf(format string, a ...interface{}) string {
+ return c.wrap(fmt.Sprintf(format, a...))
+}
+
+// FprintFunc returns a new function that prints the passed arguments as
+// colorized with color.Fprint().
+func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
+ return func(w io.Writer, a ...interface{}) {
+ c.Fprint(w, a...)
+ }
+}
+
+// PrintFunc returns a new function that prints the passed arguments as
+// colorized with color.Print().
+func (c *Color) PrintFunc() func(a ...interface{}) {
+ return func(a ...interface{}) {
+ c.Print(a...)
+ }
+}
+
+// FprintfFunc returns a new function that prints the passed arguments as
+// colorized with color.Fprintf().
+func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
+ return func(w io.Writer, format string, a ...interface{}) {
+ c.Fprintf(w, format, a...)
+ }
+}
+
+// PrintfFunc returns a new function that prints the passed arguments as
+// colorized with color.Printf().
+func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
+ return func(format string, a ...interface{}) {
+ c.Printf(format, a...)
+ }
+}
+
+// FprintlnFunc returns a new function that prints the passed arguments as
+// colorized with color.Fprintln().
+func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
+ return func(w io.Writer, a ...interface{}) {
+ c.Fprintln(w, a...)
+ }
+}
+
+// PrintlnFunc returns a new function that prints the passed arguments as
+// colorized with color.Println().
+func (c *Color) PrintlnFunc() func(a ...interface{}) {
+ return func(a ...interface{}) {
+ c.Println(a...)
+ }
+}
+
+// SprintFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprint(). Useful to put into or mix into other
+// string. Windows users should use this in conjunction with color.Output, example:
+//
+// put := New(FgYellow).SprintFunc()
+// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
+func (c *Color) SprintFunc() func(a ...interface{}) string {
+ return func(a ...interface{}) string {
+ return c.wrap(fmt.Sprint(a...))
+ }
+}
+
+// SprintfFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprintf(). Useful to put into or mix into other
+// string. Windows users should use this in conjunction with color.Output.
+func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
+ return func(format string, a ...interface{}) string {
+ return c.wrap(fmt.Sprintf(format, a...))
+ }
+}
+
+// SprintlnFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprintln(). Useful to put into or mix into other
+// string. Windows users should use this in conjunction with color.Output.
+func (c *Color) SprintlnFunc() func(a ...interface{}) string {
+ return func(a ...interface{}) string {
+ return c.wrap(fmt.Sprintln(a...))
+ }
+}
+
+// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
+// an example output might be: "1;36" -> bold cyan
+func (c *Color) sequence() string {
+ format := make([]string, len(c.params))
+ for i, v := range c.params {
+ format[i] = strconv.Itoa(int(v))
+ }
+
+ return strings.Join(format, ";")
+}
+
+// wrap wraps the s string with the colors attributes. The string is ready to
+// be printed.
+func (c *Color) wrap(s string) string {
+ if c.isNoColorSet() {
+ return s
+ }
+
+ return c.format() + s + c.unformat()
+}
+
+func (c *Color) format() string {
+ return fmt.Sprintf("%s[%sm", escape, c.sequence())
+}
+
+func (c *Color) unformat() string {
+ return fmt.Sprintf("%s[%dm", escape, Reset)
+}
+
+// DisableColor disables the color output. Useful to not change any existing
+// code and still being able to output. Can be used for flags like
+// "--no-color". To enable back use EnableColor() method.
+func (c *Color) DisableColor() {
+ c.noColor = boolPtr(true)
+}
+
+// EnableColor enables the color output. Use it in conjunction with
+// DisableColor(). Otherwise this method has no side effects.
+func (c *Color) EnableColor() {
+ c.noColor = boolPtr(false)
+}
+
+func (c *Color) isNoColorSet() bool {
+ // check first if we have user setted action
+ if c.noColor != nil {
+ return *c.noColor
+ }
+
+ // if not return the global option, which is disabled by default
+ return NoColor
+}
+
+// Equals returns a boolean value indicating whether two colors are equal.
+func (c *Color) Equals(c2 *Color) bool {
+ if len(c.params) != len(c2.params) {
+ return false
+ }
+
+ for _, attr := range c.params {
+ if !c2.attrExists(attr) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *Color) attrExists(a Attribute) bool {
+ for _, attr := range c.params {
+ if attr == a {
+ return true
+ }
+ }
+
+ return false
+}
+
+func boolPtr(v bool) *bool {
+ return &v
+}
+
+func getCachedColor(p Attribute) *Color {
+ colorsCacheMu.Lock()
+ defer colorsCacheMu.Unlock()
+
+ c, ok := colorsCache[p]
+ if !ok {
+ c = New(p)
+ colorsCache[p] = c
+ }
+
+ return c
+}
+
+func colorPrint(format string, p Attribute, a ...interface{}) {
+ c := getCachedColor(p)
+
+ if !strings.HasSuffix(format, "\n") {
+ format += "\n"
+ }
+
+ if len(a) == 0 {
+ c.Print(format)
+ } else {
+ c.Printf(format, a...)
+ }
+}
+
+func colorString(format string, p Attribute, a ...interface{}) string {
+ c := getCachedColor(p)
+
+ if len(a) == 0 {
+ return c.SprintFunc()(format)
+ }
+
+ return c.SprintfFunc()(format, a...)
+}
+
+// Black is a convenient helper function to print with black foreground. A
+// newline is appended to format by default.
+func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
+
+// Red is a convenient helper function to print with red foreground. A
+// newline is appended to format by default.
+func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
+
+// Green is a convenient helper function to print with green foreground. A
+// newline is appended to format by default.
+func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
+
+// Yellow is a convenient helper function to print with yellow foreground.
+// A newline is appended to format by default.
+func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
+
+// Blue is a convenient helper function to print with blue foreground. A
+// newline is appended to format by default.
+func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
+
+// Magenta is a convenient helper function to print with magenta foreground.
+// A newline is appended to format by default.
+func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
+
+// Cyan is a convenient helper function to print with cyan foreground. A
+// newline is appended to format by default.
+func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
+
+// White is a convenient helper function to print with white foreground. A
+// newline is appended to format by default.
+func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
+
+// BlackString is a convenient helper function to return a string with black
+// foreground.
+func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
+
+// RedString is a convenient helper function to return a string with red
+// foreground.
+func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
+
+// GreenString is a convenient helper function to return a string with green
+// foreground.
+func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
+
+// YellowString is a convenient helper function to return a string with yellow
+// foreground.
+func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
+
+// BlueString is a convenient helper function to return a string with blue
+// foreground.
+func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
+
+// MagentaString is a convenient helper function to return a string with magenta
+// foreground.
+func MagentaString(format string, a ...interface{}) string {
+ return colorString(format, FgMagenta, a...)
+}
+
+// CyanString is a convenient helper function to return a string with cyan
+// foreground.
+func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
+
+// WhiteString is a convenient helper function to return a string with white
+// foreground.
+func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
+
+// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
+// newline is appended to format by default.
+func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
+
+// HiRed is a convenient helper function to print with hi-intensity red foreground. A
+// newline is appended to format by default.
+func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
+
+// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
+// newline is appended to format by default.
+func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
+
+// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
+// A newline is appended to format by default.
+func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
+
+// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
+// newline is appended to format by default.
+func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
+
+// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
+// A newline is appended to format by default.
+func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
+
+// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
+// newline is appended to format by default.
+func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
+
+// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
+// newline is appended to format by default.
+func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
+
+// HiBlackString is a convenient helper function to return a string with hi-intensity black
+// foreground.
+func HiBlackString(format string, a ...interface{}) string {
+ return colorString(format, FgHiBlack, a...)
+}
+
+// HiRedString is a convenient helper function to return a string with hi-intensity red
+// foreground.
+func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
+
+// HiGreenString is a convenient helper function to return a string with hi-intensity green
+// foreground.
+func HiGreenString(format string, a ...interface{}) string {
+ return colorString(format, FgHiGreen, a...)
+}
+
+// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
+// foreground.
+func HiYellowString(format string, a ...interface{}) string {
+ return colorString(format, FgHiYellow, a...)
+}
+
+// HiBlueString is a convenient helper function to return a string with hi-intensity blue
+// foreground.
+func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
+
+// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
+// foreground.
+func HiMagentaString(format string, a ...interface{}) string {
+ return colorString(format, FgHiMagenta, a...)
+}
+
+// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
+// foreground.
+func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
+
+// HiWhiteString is a convenient helper function to return a string with hi-intensity white
+// foreground.
+func HiWhiteString(format string, a ...interface{}) string {
+ return colorString(format, FgHiWhite, a...)
+}
diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go
new file mode 100644
index 0000000..cf1e965
--- /dev/null
+++ b/vendor/github.com/fatih/color/doc.go
@@ -0,0 +1,133 @@
+/*
+Package color is an ANSI color package to output colorized or SGR defined
+output to the standard output. The API can be used in several way, pick one
+that suits you.
+
+Use simple and default helper functions with predefined foreground colors:
+
+ color.Cyan("Prints text in cyan.")
+
+ // a newline will be appended automatically
+ color.Blue("Prints %s in blue.", "text")
+
+ // More default foreground colors..
+ color.Red("We have red")
+ color.Yellow("Yellow color too!")
+ color.Magenta("And many others ..")
+
+ // Hi-intensity colors
+ color.HiGreen("Bright green color.")
+ color.HiBlack("Bright black means gray..")
+ color.HiWhite("Shiny white color!")
+
+However there are times where custom color mixes are required. Below are some
+examples to create custom color objects and use the print functions of each
+separate color object.
+
+ // Create a new color object
+ c := color.New(color.FgCyan).Add(color.Underline)
+ c.Println("Prints cyan text with an underline.")
+
+ // Or just add them to New()
+ d := color.New(color.FgCyan, color.Bold)
+ d.Printf("This prints bold cyan %s\n", "too!.")
+
+
+ // Mix up foreground and background colors, create new mixes!
+ red := color.New(color.FgRed)
+
+ boldRed := red.Add(color.Bold)
+ boldRed.Println("This will print text in bold red.")
+
+ whiteBackground := red.Add(color.BgWhite)
+ whiteBackground.Println("Red text with White background.")
+
+ // Use your own io.Writer output
+ color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
+
+ blue := color.New(color.FgBlue)
+ blue.Fprint(myWriter, "This will print text in blue.")
+
+You can create PrintXxx functions to simplify even more:
+
+ // Create a custom print function for convenient
+ red := color.New(color.FgRed).PrintfFunc()
+ red("warning")
+ red("error: %s", err)
+
+ // Mix up multiple attributes
+ notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
+ notice("don't forget this...")
+
+You can also FprintXxx functions to pass your own io.Writer:
+
+ blue := color.New(FgBlue).FprintfFunc()
+ blue(myWriter, "important notice: %s", stars)
+
+ // Mix up with multiple attributes
+ success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
+ success(myWriter, don't forget this...")
+
+
+Or create SprintXxx functions to mix strings with other non-colorized strings:
+
+ yellow := New(FgYellow).SprintFunc()
+ red := New(FgRed).SprintFunc()
+
+ fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
+
+ info := New(FgWhite, BgGreen).SprintFunc()
+ fmt.Printf("this %s rocks!\n", info("package"))
+
+Windows support is enabled by default. All Print functions work as intended.
+However only for color.SprintXXX functions, user should use fmt.FprintXXX and
+set the output to color.Output:
+
+ fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
+
+ info := New(FgWhite, BgGreen).SprintFunc()
+ fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
+
+Using with existing code is possible. Just use the Set() method to set the
+standard output to the given parameters. That way a rewrite of an existing
+code is not required.
+
+ // Use handy standard colors.
+ color.Set(color.FgYellow)
+
+ fmt.Println("Existing text will be now in Yellow")
+ fmt.Printf("This one %s\n", "too")
+
+ color.Unset() // don't forget to unset
+
+ // You can mix up parameters
+ color.Set(color.FgMagenta, color.Bold)
+ defer color.Unset() // use it in your function
+
+ fmt.Println("All text will be now bold magenta.")
+
+There might be a case where you want to disable color output (for example to
+pipe the standard output of your app to somewhere else). `Color` has support to
+disable colors both globally and for single color definition. For example
+suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
+the color output with:
+
+ var flagNoColor = flag.Bool("no-color", false, "Disable color output")
+
+ if *flagNoColor {
+ color.NoColor = true // disables colorized output
+ }
+
+It also has support for single color definitions (local). You can
+disable/enable color output on the fly:
+
+ c := color.New(color.FgCyan)
+ c.Println("Prints cyan text")
+
+ c.DisableColor()
+ c.Println("This is printed without any color")
+
+ c.EnableColor()
+ c.Println("This prints again cyan...")
+*/
+package color
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 0000000..fad8958
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*.go]
+indent_style = tab
+indent_size = 4
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
new file mode 100644
index 0000000..32f1001
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes
@@ -0,0 +1 @@
+go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 0000000..4cd0cba
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
new file mode 100644
index 0000000..a9c3016
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -0,0 +1,36 @@
+sudo: false
+language: go
+
+go:
+ - "stable"
+ - "1.11.x"
+ - "1.10.x"
+ - "1.9.x"
+
+matrix:
+ include:
+ - go: "stable"
+ env: GOLINT=true
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+
+before_install:
+ - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
+
+script:
+ - go test --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+ - windows
+
+notifications:
+ email: false
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 0000000..5ab5d41
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L
+Adrien Bustany
+Amit Krishnan
+Anmol Sethi
+Bjørn Erik Pedersen
+Bruno Bigras
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Daniel Wagner-Hall
+Dave Cheney
+Evan Phoenix
+Francisco Souza
+Hari haran
+John C Barstow
+Kelvin Fo
+Ken-ichirou MATSUZAWA
+Matt Layher
+Nathan Youngman
+Nickolai Zeldovich
+Patrick
+Paul Hammond
+Pawel Knap
+Pieter Droogendijk
+Pursuit92
+Riku Voipio
+Rob Figueiredo
+Rodrigo Chiossi
+Slawek Ligus
+Soge Zhang
+Tiffany Jernigan
+Tilak Sharma
+Tom Payne
+Travis Cline
+Tudor Golubenco
+Vahe Khachikyan
+Yukang
+bronze1man
+debrando
+henrikedwards
+铁哥
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 0000000..be4d7ea
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 0000000..828a60b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 0000000..e180c8f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 0000000..b2629e5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,130 @@
+# File system notifications for Go
+
+[](https://godoc.org/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+| Adapter | OS | Status |
+| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
+| inotify | Linux 2.6.27 or later, Android\* | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
+| kqueue | BSD, macOS, iOS\* | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
+| ReadDirectoryChangesW | Windows | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
+| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
+| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
+| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
+| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
+| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func main() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
+```
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
+
+fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 0000000..ced39cb
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 0000000..89cab04
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,68 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var (
+ ErrEventOverflow = errors.New("fsnotify queue overflow")
+)
diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod
new file mode 100644
index 0000000..ff11e13
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/go.mod
@@ -0,0 +1,5 @@
+module github.com/fsnotify/fsnotify
+
+go 1.13
+
+require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9
diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum
new file mode 100644
index 0000000..f60af98
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 0000000..d9fd1b8
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,337 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 0000000..b33f2b4
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 0000000..86e76a3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,521 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // send a "quit" message to the reader goroutine
+ close(w.done)
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ break loop
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 0000000..2306c46
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 0000000..870c4d6
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 0000000..09436f3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/gardener/etcd-druid/LICENSE.md b/vendor/github.com/gardener/etcd-druid/LICENSE.md
new file mode 100644
index 0000000..14dd12f
--- /dev/null
+++ b/vendor/github.com/gardener/etcd-druid/LICENSE.md
@@ -0,0 +1,289 @@
+SPDX short identifier: Apache-2.0
+
+```
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+```
+
+## APIs
+
+This project may include APIs to SAP or third party products or services. The use of these APIs,
+products and services may be subject to additional agreements. In no event shall the application
+of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products
+or services that would alter, expand, be inconsistent with, or supersede any terms of these additional
+agreements. API means application programming interfaces, as well as their respective specifications
+and implementing code that allows other software products to communicate with or call on SAP or
+third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and
+ABAP calls or other user exits) and may be made available through SAP or third party products,
+SDKs, documentation or other media.
+
+## Subcomponents
+
+This project includes the following subcomponents that are subject to separate license terms.
+Your use of these subcomponents is subject to the separate license terms applicable to
+each subcomponent.
+
+Schema of the external API types that are served by the Kubernetes API server
+https://github.com/kubernetes/api
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://github.com/kubernetes/api/blob/master/LICENSE)
+
+Scheme, typing, encoding, decoding, and conversion packages for Kubernetes and Kubernetes-like API objects.
+https://github.com/kubernetes/apimachinery
+Copyright 2014 The Kubernetes Authors
+Apache 2 license (https://github.com/kubernetes/apimachinery/blob/master/LICENSE)
+
+Logrus.
+https://github.com/sirupsen/logrus
+Copyright (c) 2014 Simon Eskildsen
+MIT license (https://github.com/sirupsen/logrus/blob/master/LICENSE)
+
+Client-go
+https://github.com/kubernetes/client-go
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://github.com/kubernetes/client-go/blob/master/LICENSE)
+
+Etcd
+https://github.com/coreos/etcd
+Copyright 2017 The etcd Authors
+Apache 2 license (https://github.com/coreos/etcd/blob/master/LICENSE)
+
+Kubebuilder
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://github.com/kubernetes-sigs/kubebuilder/blob/master/LICENSE)
+
+Helm
+https://git.k8s.io/helm
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://git.k8s.io/helm/LICENSE)
+
+------
+
+## MIT License
+
+SPDX short identifier: MIT
+
+```
+ The MIT License (MIT)
+
+ Copyright (c)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+```
+
+------
diff --git a/vendor/github.com/gardener/etcd-druid/NOTICE.md b/vendor/github.com/gardener/etcd-druid/NOTICE.md
new file mode 100644
index 0000000..c7037d2
--- /dev/null
+++ b/vendor/github.com/gardener/etcd-druid/NOTICE.md
@@ -0,0 +1,3 @@
+## etcd-druid
+
+Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
diff --git a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go
new file mode 100644
index 0000000..138b26b
--- /dev/null
+++ b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/etcd_types.go
@@ -0,0 +1,324 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // GarbageCollectionPolicyExponential defines the exponential policy for garbage collecting old backups
+ GarbageCollectionPolicyExponential = "Exponential"
+ // GarbageCollectionPolicyLimitBased defines the limit based policy for garbage collecting old backups
+ GarbageCollectionPolicyLimitBased = "LimitBased"
+
+ // Basic is a constant for metrics level basic.
+ Basic MetricsLevel = "basic"
+ // Extensive is a constant for metrics level extensive.
+ Extensive MetricsLevel = "extensive"
+)
+
+// MetricsLevel defines the level 'basic' or 'extensive'.
+// +kubebuilder:validation:Enum=basic;extensive
+type MetricsLevel string
+
+// GarbageCollectionPolicy defines the type of policy for snapshot garbage collection.
+// +kubebuilder:validation:Enum=Exponential;LimitBased
+type GarbageCollectionPolicy string
+
+// StorageProvider defines the type of object store provider for storing backups.
+type StorageProvider string
+
+// StoreSpec defines parameters related to ObjectStore persisting backups
+type StoreSpec struct {
+ // +optional
+ Container *string `json:"container,omitempty"`
+ // +required
+ Prefix string `json:"prefix"`
+ // +optional
+ Provider *StorageProvider `json:"provider,omitempty"`
+ // +optional
+ SecretRef *corev1.SecretReference `json:"secretRef,omitempty"`
+}
+
+// TLSConfig hold the TLS configuration details.
+type TLSConfig struct {
+ // +required
+ ServerTLSSecretRef corev1.SecretReference `json:"serverTLSSecretRef"`
+ // +required
+ ClientTLSSecretRef corev1.SecretReference `json:"clientTLSSecretRef"`
+ // +required
+ TLSCASecretRef corev1.SecretReference `json:"tlsCASecretRef"`
+}
+
+// BackupSpec defines parametes associated with the full and delta snapshots of etcd
+type BackupSpec struct {
+ // Port define the port on which etcd-backup-restore server will exposed.
+ // +optional
+ Port *int `json:"port,omitempty"`
+ // +optional
+ TLS *TLSConfig `json:"tls,omitempty"`
+ // Image defines the etcd container image and tag
+ // +optional
+ Image *string `json:"image,omitempty"`
+ // Store defines the specification of object store provider for storing backups.
+ // +optional
+ Store *StoreSpec `json:"store,omitempty"`
+ // Resources defines the compute Resources required by backup-restore container.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+ // +optional
+ Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+ // FullSnapshotSchedule defines the cron standard schedule for full snapshots.
+ // +optional
+ FullSnapshotSchedule *string `json:"fullSnapshotSchedule,omitempty"`
+ // GarbageCollectionPolicy defines the policy for garbage collecting old backups
+ // +optional
+ GarbageCollectionPolicy *GarbageCollectionPolicy `json:"garbageCollectionPolicy,omitempty"`
+ // GarbageCollectionPeriod defines the period for garbage collecting old backups
+ // +optional
+ GarbageCollectionPeriod *metav1.Duration `json:"garbageCollectionPeriod,omitempty"`
+ // DeltaSnapshotPeriod defines the period after which delta snapshots will be taken
+ // +optional
+ DeltaSnapshotPeriod *metav1.Duration `json:"deltaSnapshotPeriod,omitempty"`
+ // DeltaSnapshotMemoryLimit defines the memory limit after which delta snapshots will be taken
+ // +optional
+ DeltaSnapshotMemoryLimit *resource.Quantity `json:"deltaSnapshotMemoryLimit,omitempty"`
+}
+
+// EtcdConfig defines parametes associated etcd deployed
+type EtcdConfig struct {
+ // Quota defines the etcd DB quota.
+ // +optional
+ Quota *resource.Quantity `json:"quota,omitempty"`
+ // DefragmentationSchedule defines the cron standard schedule for defragmentation of etcd.
+ // +optional
+ DefragmentationSchedule *string `json:"defragmentationSchedule,omitempty"`
+ // +optional
+ ServerPort *int `json:"serverPort,omitempty"`
+ // +optional
+ ClientPort *int `json:"clientPort,omitempty"`
+ // Image defines the etcd container image and tag
+ // +optional
+ Image *string `json:"image,omitempty"`
+ // +optional
+ AuthSecretRef *corev1.SecretReference `json:"authSecretRef,omitempty"`
+ // Metrics defines the level of detail for exported metrics of etcd, specify 'extensive' to include histogram metrics.
+ // +optional
+ Metrics MetricsLevel `json:"metrics,omitempty"`
+ // Resources defines the compute Resources required by etcd container.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
+ // +optional
+ Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+ // +optional
+ TLS *TLSConfig `json:"tls,omitempty"`
+}
+
+// EtcdSpec defines the desired state of Etcd
+type EtcdSpec struct {
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelector `json:"selector"`
+ // +required
+ Labels map[string]string `json:"labels"`
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // +required
+ Etcd EtcdConfig `json:"etcd"`
+ // +required
+ Backup BackupSpec `json:"backup"`
+ // +required
+ Replicas int `json:"replicas"`
+ // PriorityClassName is the name of a priority class that shall be used for the etcd pods.
+ // +optional
+ PriorityClassName *string `json:"priorityClassName,omitempty"`
+ // StorageClass defines the name of the StorageClass required by the claim.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ // +optional
+ StorageClass *string `json:"storageClass,omitempty"`
+ // StorageCapacity defines the size of persistent volume.
+ // +optional
+ StorageCapacity *resource.Quantity `json:"storageCapacity,omitempty"`
+ // VolumeClaimTemplate defines the volume claim template to be created
+ // +optional
+ VolumeClaimTemplate *string `json:"volumeClaimTemplate,omitempty"`
+}
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+type CrossVersionObjectReference struct {
+ // Kind of the referent
+ // +required
+ Kind string `json:"kind,omitempty"`
+ // Name of the referent
+ // +required
+ Name string `json:"name,omitempty"`
+ // API version of the referent
+ // +optional
+ APIVersion string `json:"apiVersion,omitempty"`
+}
+
+// ConditionStatus is the status of a condition.
+type ConditionStatus string
+
+// ConditionType is a string alias.
+type ConditionType string
+
+const (
+ // ConditionAvailable is a condition type for indicating availability.
+ ConditionAvailable ConditionType = "Available"
+
+ // ConditionTrue means a resource is in the condition.
+ ConditionTrue ConditionStatus = "True"
+ // ConditionFalse means a resource is not in the condition.
+ ConditionFalse ConditionStatus = "False"
+ // ConditionUnknown means Gardener can't decide if a resource is in the condition or not.
+ ConditionUnknown ConditionStatus = "Unknown"
+ // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold.
+ // In the future, we could add other intermediate conditions, e.g. ConditionDegraded.
+ ConditionProgressing ConditionStatus = "Progressing"
+
+ // ConditionCheckError is a constant for a reason in condition.
+ ConditionCheckError = "ConditionCheckError"
+)
+
+// Condition holds the information about the state of a resource.
+type Condition struct {
+ // Type of the Etcd condition.
+ Type ConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
+ // Last time the condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message,omitempty"`
+}
+
+// EndpointStatus is the status of a condition.
+type EndpointStatus string
+
+// LastOperationType is a string alias.
+type LastOperationType string
+
+const (
+ // LastOperationTypeCreate indicates a 'create' operation.
+ LastOperationTypeCreate LastOperationType = "Create"
+ // LastOperationTypeReconcile indicates a 'reconcile' operation.
+ LastOperationTypeReconcile LastOperationType = "Reconcile"
+ // LastOperationTypeDelete indicates a 'delete' operation.
+ LastOperationTypeDelete LastOperationType = "Delete"
+)
+
+// LastOperationState is a string alias.
+type LastOperationState string
+
+const (
+ // LastOperationStateProcessing indicates that an operation is ongoing.
+ LastOperationStateProcessing LastOperationState = "Processing"
+ // LastOperationStateSucceeded indicates that an operation has completed successfully.
+ LastOperationStateSucceeded LastOperationState = "Succeeded"
+ // LastOperationStateError indicates that an operation is completed with errors and will be retried.
+ LastOperationStateError LastOperationState = "Error"
+ // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried.
+ LastOperationStateFailed LastOperationState = "Failed"
+ // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future.
+ LastOperationStatePending LastOperationState = "Pending"
+ // LastOperationStateAborted indicates that an operation has been aborted.
+ LastOperationStateAborted LastOperationState = "Aborted"
+)
+
+// LastOperation indicates the type and the state of the last operation, along with a description
+// message and a progress indicator.
+type LastOperation struct {
+ // A human readable message indicating details about the last operation.
+ Description string `json:"description,omitempty"`
+ // Last time the operation state transitioned from one to another.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
+ // The progress in percentage (0-100) of the last operation.
+ Progress int `json:"progress,omitempty"`
+ // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed.
+ State LastOperationState `json:"state,omitempty"`
+ // Type of the last operation, one of Create, Reconcile, Delete.
+ Type LastOperationType `json:"type,omitempty"`
+}
+
+// EtcdStatus defines the observed state of Etcd
+type EtcdStatus struct {
+ // ObservedGeneration is the most recent generation observed for this resource.
+ // +optional
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // +optional
+ Etcd CrossVersionObjectReference `json:"etcd,omitempty"`
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty"`
+ // +optional
+ CurrentReplicas int32 `json:"currentReplicas,omitempty"`
+ // +optional
+ ServiceName *string `json:"serviceName,omitempty"`
+ // +optional
+ LastError *string `json:"lastError,omitempty"`
+ // +optional
+ Replicas int32 `json:"replicas,omitempty"`
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty"`
+ // +optional
+ Ready *bool `json:"ready,omitempty"`
+ // +optional
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty"`
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // +optional
+ LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
+ //LastOperation LastOperation `json:"lastOperation,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.ready`
+// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
+// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector
+
+// Etcd is the Schema for the etcds API
+type Etcd struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec EtcdSpec `json:"spec,omitempty"`
+ Status EtcdStatus `json:"status,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// +kubebuilder:object:root=true
+
+// EtcdList contains a list of Etcd
+type EtcdList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Etcd `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Etcd{}, &EtcdList{})
+}
diff --git a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/groupversion_info.go b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/groupversion_info.go
new file mode 100644
index 0000000..9ac143f
--- /dev/null
+++ b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/groupversion_info.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v1alpha1 contains API Schema definitions for the druid v1alpha1 API group
+// +kubebuilder:object:generate=true
+// +groupName=druid.gardener.cloud
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "druid.gardener.cloud", Version: "v1alpha1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..b8f363e
--- /dev/null
+++ b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,404 @@
+// +build !ignore_autogenerated
+
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
+ *out = *in
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(int)
+ **out = **in
+ }
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(TLSConfig)
+ **out = **in
+ }
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(string)
+ **out = **in
+ }
+ if in.Store != nil {
+ in, out := &in.Store, &out.Store
+ *out = new(StoreSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(v1.ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FullSnapshotSchedule != nil {
+ in, out := &in.FullSnapshotSchedule, &out.FullSnapshotSchedule
+ *out = new(string)
+ **out = **in
+ }
+ if in.GarbageCollectionPolicy != nil {
+ in, out := &in.GarbageCollectionPolicy, &out.GarbageCollectionPolicy
+ *out = new(GarbageCollectionPolicy)
+ **out = **in
+ }
+ if in.GarbageCollectionPeriod != nil {
+ in, out := &in.GarbageCollectionPeriod, &out.GarbageCollectionPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DeltaSnapshotPeriod != nil {
+ in, out := &in.DeltaSnapshotPeriod, &out.DeltaSnapshotPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DeltaSnapshotMemoryLimit != nil {
+ in, out := &in.DeltaSnapshotMemoryLimit, &out.DeltaSnapshotMemoryLimit
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec.
+func (in *BackupSpec) DeepCopy() *BackupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
+func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(CrossVersionObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Etcd) DeepCopyInto(out *Etcd) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd.
+func (in *Etcd) DeepCopy() *Etcd {
+ if in == nil {
+ return nil
+ }
+ out := new(Etcd)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Etcd) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdConfig) DeepCopyInto(out *EtcdConfig) {
+ *out = *in
+ if in.Quota != nil {
+ in, out := &in.Quota, &out.Quota
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.DefragmentationSchedule != nil {
+ in, out := &in.DefragmentationSchedule, &out.DefragmentationSchedule
+ *out = new(string)
+ **out = **in
+ }
+ if in.ServerPort != nil {
+ in, out := &in.ServerPort, &out.ServerPort
+ *out = new(int)
+ **out = **in
+ }
+ if in.ClientPort != nil {
+ in, out := &in.ClientPort, &out.ClientPort
+ *out = new(int)
+ **out = **in
+ }
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(string)
+ **out = **in
+ }
+ if in.AuthSecretRef != nil {
+ in, out := &in.AuthSecretRef, &out.AuthSecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(v1.ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(TLSConfig)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConfig.
+func (in *EtcdConfig) DeepCopy() *EtcdConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdList) DeepCopyInto(out *EtcdList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Etcd, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList.
+func (in *EtcdList) DeepCopy() *EtcdList {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EtcdList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Etcd.DeepCopyInto(&out.Etcd)
+ in.Backup.DeepCopyInto(&out.Backup)
+ if in.PriorityClassName != nil {
+ in, out := &in.PriorityClassName, &out.PriorityClassName
+ *out = new(string)
+ **out = **in
+ }
+ if in.StorageClass != nil {
+ in, out := &in.StorageClass, &out.StorageClass
+ *out = new(string)
+ **out = **in
+ }
+ if in.StorageCapacity != nil {
+ in, out := &in.StorageCapacity, &out.StorageCapacity
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.VolumeClaimTemplate != nil {
+ in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec.
+func (in *EtcdSpec) DeepCopy() *EtcdSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) {
+ *out = *in
+ if in.ObservedGeneration != nil {
+ in, out := &in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = **in
+ }
+ out.Etcd = in.Etcd
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ServiceName != nil {
+ in, out := &in.ServiceName, &out.ServiceName
+ *out = new(string)
+ **out = **in
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(string)
+ **out = **in
+ }
+ if in.Ready != nil {
+ in, out := &in.Ready, &out.Ready
+ *out = new(bool)
+ **out = **in
+ }
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus.
+func (in *EtcdStatus) DeepCopy() *EtcdStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastOperation) DeepCopyInto(out *LastOperation) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation.
+func (in *LastOperation) DeepCopy() *LastOperation {
+ if in == nil {
+ return nil
+ }
+ out := new(LastOperation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StoreSpec) DeepCopyInto(out *StoreSpec) {
+ *out = *in
+ if in.Container != nil {
+ in, out := &in.Container, &out.Container
+ *out = new(string)
+ **out = **in
+ }
+ if in.Provider != nil {
+ in, out := &in.Provider, &out.Provider
+ *out = new(StorageProvider)
+ **out = **in
+ }
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreSpec.
+func (in *StoreSpec) DeepCopy() *StoreSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StoreSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
+ *out = *in
+ out.ServerTLSSecretRef = in.ServerTLSSecretRef
+ out.ClientTLSSecretRef = in.ClientTLSSecretRef
+ out.TLSCASecretRef = in.TLSCASecretRef
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/external-dns-management/LICENSE.md b/vendor/github.com/gardener/external-dns-management/LICENSE.md
new file mode 100644
index 0000000..f178789
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/LICENSE.md
@@ -0,0 +1,320 @@
+```
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+```
+
+## APIs
+
+This project may include APIs to SAP or third party products or services. The use of these APIs, products and services may be subject to additional agreements. In no event shall the application of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products or services that would alter, expand, be inconsistent with, or supersede any terms of these additional agreements. API means application programming interfaces, as well as their respective specifications and implementing code that allows other software products to communicate with or call on SAP or third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and ABAP calls or other user exits) and may be made available through SAP or third party products, SDKs, documentation or other media.
+
+## Subcomponents
+
+This project includes the following subcomponents that are subject to separate license terms.
+Your use of these subcomponents is subject to the separate license terms applicable to
+each subcomponent.
+
+Gardener Controller Manager Library
+Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
+Apache 2 license (https://github.com/gardener/controller-manager-library/blob/master/LICENSE.md)
+
+APIMachinery
+https://git.k8s.io/apimachinery
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://git.k8s.io/apimachinery/LICENSE)
+
+Client-Go
+https://git.k8s.io/client-go
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://git.k8s.io/client-go/LICENSE)
+
+Code-Generator
+https://git.k8s.io/code-generator
+Copyright 2017 The Kubernetes Authors
+Apache 2 license (https://git.k8s.io/code-generator/LICENSE)
+
+Cobra.
+https://github.com/spf13/cobra
+Copyright © 2013 Steve Francia
+Apache 2 license (https://git.k8s.io/code-generator/LICENSE)
+
+Logrus.
+https://github.com/sirupsen/logrus
+Copyright (c) 2014 Simon Eskildsen
+MIT license (https://github.com/sirupsen/logrus/blob/master/LICENSE)
+
+Google DNS SDK
+https://github.com/googleapis/google-api-go-client
+Copyright (c) 2011 Google Inc. All rights reserved
+BSD 3-Clause "New" or "Revised" License (https://github.com/googleapis/google-api-go-client/blob/master/LICENSE)
+
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
+Apache 2 license (https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
+
+Some code fragments have been extracted from kubernetes-incubator/external-dns
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://github.com/kubernetes-incubator/external-dns/blob/master/LICENSE)
+
+------
+
+## MIT License
+
+ SPDX short identifier: MIT
+
+```
+ The MIT License (MIT)
+
+ Copyright (c)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+```
+
+## The 3-Clause BSD License
+
+ SPDX short identifier: BSD-3-Clause
+
+```
+ Copyright
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+```
diff --git a/vendor/github.com/gardener/external-dns-management/NOTICE.md b/vendor/github.com/gardener/external-dns-management/NOTICE.md
new file mode 100644
index 0000000..def0add
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/NOTICE.md
@@ -0,0 +1,2 @@
+## External DNS Management
+Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go
new file mode 100644
index 0000000..a43fe39
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+//go:generate bash ../../../vendor/github.com/gardener/controller-manager-library/hack/generate-crds
+//go:generate bash ../../../hack/generate-code
+// +kubebuilder:skip
+
+package dns
+
+const (
+ GroupName = "dns.gardener.cloud"
+)
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go
new file mode 100644
index 0000000..7d17d66
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSAnnotationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []DNSAnnotation `json:"items"`
+}
+
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:scope=Namespaced,path=dnsannotations,shortName=dnsa,singular=dnsannotation
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name=RefGroup,JSONPath=".spec.resourceRef.apiVersion",type=string
+// +kubebuilder:printcolumn:name=RefKind,JSONPath=".spec.resourceRef.kind",type=string
+// +kubebuilder:printcolumn:name=RefName,JSONPath=".spec.resourceRef.name",type=string
+// +kubebuilder:printcolumn:name=RefNamespace,JSONPath=".spec.resourceRef.namespace",type=string
+// +kubebuilder:printcolumn:name=Active,JSONPath=".status.active",type=boolean
+// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSAnnotation struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec DNSAnnotationSpec `json:"spec"`
+ // +optional
+ Status DNSAnnotationStatus `json:"status,omitempty"`
+}
+
+type DNSAnnotationSpec struct {
+ ResourceRef ResourceReference `json:"resourceRef"`
+ Annotations map[string]string `json:"annotations"`
+}
+
+type ResourceReference struct {
+ // API Version of the annotated object
+ APIVersion string `json:"apiVersion"`
+ // Kind of the annotated object
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+ // Name of the annotated object
+ // +optional
+ Name string `json:"name,omitempty"`
+ // Namspace of the annotated object
+ // Defaulted by the namespace of the containing resource.
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+}
+
+type DNSAnnotationStatus struct {
+ // Indicates that annotation is observed by a DNS sorce controller
+ // +optional
+ Active bool `json:"active,omitempty"`
+ // In case of a configuration problem this field describes the reason
+ // +optional
+ Message string `json:"message,omitempty"`
+}
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go
new file mode 100644
index 0000000..7aaf466
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSEntryList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []DNSEntry `json:"items"`
+}
+
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:scope=Namespaced,path=dnsentries,shortName=dnse,singular=dnsentry
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name=DNS,description="FQDN of DNS Entry",JSONPath=".spec.dnsName",type=string
+// +kubebuilder:printcolumn:name=OWNERID,JSONPath=".spec.ownerId",type=string
+// +kubebuilder:printcolumn:name=TYPE,JSONPath=".status.providerType",type=string
+// +kubebuilder:printcolumn:name=PROVIDER,JSONPath=".status.provider",type=string
+// +kubebuilder:printcolumn:name=STATUS,JSONPath=".status.state",type=string
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSEntry struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec DNSEntrySpec `json:"spec"`
+ // +optional
+ Status DNSEntryStatus `json:"status,omitempty"`
+}
+
+type DNSEntrySpec struct {
+ // full qualified domain name
+ DNSName string `json:"dnsName"`
+ // reference to base entry used to inherit attributes from
+ // +optional
+ Reference *EntryReference `json:"reference,omitempty"`
+ // owner id used to tag entries in external DNS system
+ // +optional
+ OwnerId *string `json:"ownerId,omitempty"`
+ // time to live for records in external DNS system
+ // +optional
+ TTL *int64 `json:"ttl,omitempty"`
+ // lookup interval for CNAMEs that must be resolved to IP addresses
+ // +optional
+ CNameLookupInterval *int64 `json:"cnameLookupInterval,omitempty"`
+ // text records, either text or targets must be specified
+ // +optional
+ Text []string `json:"text,omitempty"`
+ // target records (CNAME or A records), either text or targets must be specified
+ // +optional
+ Targets []string `json:"targets,omitempty"`
+}
+
+type DNSEntryStatus struct {
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+ // entry state
+ // +optional
+ State string `json:"state"`
+ // message describing the reason for the state
+ // +optional
+ Message *string `json:"message,omitempty"`
+ // provider type used for the entry
+ // +optional
+ ProviderType *string `json:"providerType,omitempty"`
+ // assigned provider
+ // +optional
+ Provider *string `json:"provider,omitempty"`
+ // zone used for the entry
+ // +optional
+ Zone *string `json:"zone,omitempty"`
+ // time to live used for the entry
+ // +optional
+ TTL *int64 `json:"ttl,omitempty"`
+ // effective targets generated for the entry
+ // +optional
+ Targets []string `json:"targets,omitempty"`
+}
+
+type EntryReference struct {
+ // name of the referenced DNSEntry object
+ Name string `json:"name"`
+ // namespace of the referenced DNSEntry object
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+}
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go
new file mode 100644
index 0000000..0c23b33
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSOwnerList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []DNSOwner `json:"items"`
+}
+
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:scope=Cluster,path=dnsowners,shortName=dnso,singular=dnsowner
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name=OwnerId,JSONPath=".spec.ownerId",type=string
+// +kubebuilder:printcolumn:name=Active,JSONPath=".spec.active",type=boolean
+// +kubebuilder:printcolumn:name=Usages,JSONPath=".status.amount",type=string
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSOwner struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec DNSOwnerSpec `json:"spec"`
+ // +optional
+ Status DNSOwnerStatus `json:"status,omitempty"`
+}
+
+type DNSOwnerSpec struct {
+ // owner id used to tag entries in external DNS system
+ OwnerId string `json:"ownerId"`
+ // state of the ownerid for the DNS controller observing entry using this owner id
+ // (default:true)
+ // +optional
+ Active *bool `json:"active,omitempty"`
+}
+
+type DNSOwnerStatus struct {
+ // Entry statistic for this owner id
+ // +optional
+ Entries DNSOwnerStatusEntries `json:"entries,omitempty"`
+}
+
+type DNSOwnerStatusEntries struct {
+ // number of entries using this owner id
+ // +optional
+ Amount int `json:"amount"`
+ // number of entries per provider type
+ // +optional
+ ByType map[string]int `json:"types,omitempty"`
+}
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go
new file mode 100644
index 0000000..793890c
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSProviderList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []DNSProvider `json:"items"`
+}
+
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:scope=Namespaced,path=dnsproviders,shortName=dnspr,singular=dnsprovider
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name=TYPE,JSONPath=".spec.type",type=string
+// +kubebuilder:printcolumn:name=STATUS,JSONPath=".status.state",type=string
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type DNSProvider struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec DNSProviderSpec `json:"spec"`
+ // +optional
+ Status DNSProviderStatus `json:"status,omitempty"`
+}
+
+type DNSProviderSpec struct {
+ // type of the provider (selecting the responsible type of DNS controller)
+ Type string `json:"type,omitempty"`
+ // optional additional provider specific configuration values
+ // +kubebuilder:validation:XPreserveUnknownFields
+ // +kubebuilder:pruning:PreserveUnknownFields
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"`
+ // access credential for the external DNS system of the given type
+ SecretRef *corev1.SecretReference `json:"secretRef,omitempty"`
+ // desired selection of usable domains
+ // (by default all zones and domains in those zones will be served)
+ // +optional
+ Domains *DNSSelection `json:"domains,omitempty"`
+ // desired selection of usable domains
+ // the domain selection is used for served zones, only
+ // (by default all zones will be served)
+ // +optional
+ Zones *DNSSelection `json:"zones,omitempty"`
+}
+
+type DNSSelection struct {
+ // values that should be observed (domains or zones)
+ // + optional
+ Include []string `json:"include,omitempty"`
+ // values that should be ignored (domains or zones)
+ // + optional
+ Exclude []string `json:"exclude,omitempty"`
+}
+
+type DNSProviderStatus struct {
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+ // state of the provider
+ // +optional
+ State string `json:"state"`
+ // message describing the reason for the actual state of the provider
+ Message *string `json:"message,omitempty"`
+ // actually served domain selection
+ // +optional
+ Domains DNSSelectionStatus `json:"domains"`
+ // actually served zones
+ // +optional
+ Zones DNSSelectionStatus `json:"zones"`
+}
+
+type DNSSelectionStatus struct {
+ // included values (domains or zones)
+ // + optional
+ Included []string `json:"included,omitempty"`
+ // Excluded values (domains or zones)
+ // + optional
+ Excluded []string `json:"excluded,omitempty"`
+}
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go
new file mode 100644
index 0000000..35b9da6
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+// +k8s:deepcopy-gen=package,register
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +groupName=dns.gardener.cloud
+package v1alpha1
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go
new file mode 100644
index 0000000..1e9a627
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+package v1alpha1
+
+import (
+ "github.com/gardener/external-dns-management/pkg/apis/dns"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ Version = "v1alpha1"
+ GroupName = dns.GroupName
+
+ DNSOwnerKind = "DNSOwner"
+ DNSOwnerPlural = "dnsowners"
+
+ DNSProviderKind = "DNSProvider"
+ DNSProviderPlural = "dnsproviders"
+
+ DNSEntryKind = "DNSEntry"
+ DNSEntryPlural = "dnsentries"
+
+ DNSAnnotationKind = "DNSAnnotation"
+ DNSAnnotationPlural = "dnsannotations"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: dns.GroupName, Version: Version}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resources and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &DNSOwner{},
+ &DNSOwnerList{},
+ &DNSProvider{},
+ &DNSProviderList{},
+ &DNSEntry{},
+ &DNSEntryList{},
+ &DNSAnnotation{},
+ &DNSAnnotationList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go
new file mode 100644
index 0000000..f23b731
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ *
+ */
+
+package v1alpha1
+
+const STATE_PENDING = "Pending"
+const STATE_ERROR = "Error"
+const STATE_INVALID = "Invalid"
+const STATE_STALE = "Stale"
+const STATE_READY = "Ready"
+const STATE_DELETING = "Deleting"
diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..aa2c0ee
--- /dev/null
+++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,606 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSAnnotation) DeepCopyInto(out *DNSAnnotation) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotation.
+func (in *DNSAnnotation) DeepCopy() *DNSAnnotation {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSAnnotation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSAnnotation) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSAnnotationList) DeepCopyInto(out *DNSAnnotationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNSAnnotation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotationList.
+func (in *DNSAnnotationList) DeepCopy() *DNSAnnotationList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSAnnotationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSAnnotationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSAnnotationSpec) DeepCopyInto(out *DNSAnnotationSpec) {
+ *out = *in
+ out.ResourceRef = in.ResourceRef
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotationSpec.
+func (in *DNSAnnotationSpec) DeepCopy() *DNSAnnotationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSAnnotationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSAnnotationStatus) DeepCopyInto(out *DNSAnnotationStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotationStatus.
+func (in *DNSAnnotationStatus) DeepCopy() *DNSAnnotationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSAnnotationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSEntry) DeepCopyInto(out *DNSEntry) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntry.
+func (in *DNSEntry) DeepCopy() *DNSEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSEntry) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSEntryList) DeepCopyInto(out *DNSEntryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNSEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryList.
+func (in *DNSEntryList) DeepCopy() *DNSEntryList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSEntryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSEntryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSEntrySpec) DeepCopyInto(out *DNSEntrySpec) {
+ *out = *in
+ if in.Reference != nil {
+ in, out := &in.Reference, &out.Reference
+ *out = new(EntryReference)
+ **out = **in
+ }
+ if in.OwnerId != nil {
+ in, out := &in.OwnerId, &out.OwnerId
+ *out = new(string)
+ **out = **in
+ }
+ if in.TTL != nil {
+ in, out := &in.TTL, &out.TTL
+ *out = new(int64)
+ **out = **in
+ }
+ if in.CNameLookupInterval != nil {
+ in, out := &in.CNameLookupInterval, &out.CNameLookupInterval
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Text != nil {
+ in, out := &in.Text, &out.Text
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Targets != nil {
+ in, out := &in.Targets, &out.Targets
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntrySpec.
+func (in *DNSEntrySpec) DeepCopy() *DNSEntrySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSEntrySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSEntryStatus) DeepCopyInto(out *DNSEntryStatus) {
+ *out = *in
+ if in.Message != nil {
+ in, out := &in.Message, &out.Message
+ *out = new(string)
+ **out = **in
+ }
+ if in.ProviderType != nil {
+ in, out := &in.ProviderType, &out.ProviderType
+ *out = new(string)
+ **out = **in
+ }
+ if in.Provider != nil {
+ in, out := &in.Provider, &out.Provider
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zone != nil {
+ in, out := &in.Zone, &out.Zone
+ *out = new(string)
+ **out = **in
+ }
+ if in.TTL != nil {
+ in, out := &in.TTL, &out.TTL
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Targets != nil {
+ in, out := &in.Targets, &out.Targets
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryStatus.
+func (in *DNSEntryStatus) DeepCopy() *DNSEntryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSEntryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSOwner) DeepCopyInto(out *DNSOwner) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwner.
+func (in *DNSOwner) DeepCopy() *DNSOwner {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSOwner)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSOwner) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSOwnerList) DeepCopyInto(out *DNSOwnerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNSOwner, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerList.
+func (in *DNSOwnerList) DeepCopy() *DNSOwnerList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSOwnerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSOwnerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSOwnerSpec) DeepCopyInto(out *DNSOwnerSpec) {
+ *out = *in
+ if in.Active != nil {
+ in, out := &in.Active, &out.Active
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerSpec.
+func (in *DNSOwnerSpec) DeepCopy() *DNSOwnerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSOwnerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSOwnerStatus) DeepCopyInto(out *DNSOwnerStatus) {
+ *out = *in
+ in.Entries.DeepCopyInto(&out.Entries)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerStatus.
+func (in *DNSOwnerStatus) DeepCopy() *DNSOwnerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSOwnerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSOwnerStatusEntries) DeepCopyInto(out *DNSOwnerStatusEntries) {
+ *out = *in
+ if in.ByType != nil {
+ in, out := &in.ByType, &out.ByType
+ *out = make(map[string]int, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerStatusEntries.
+func (in *DNSOwnerStatusEntries) DeepCopy() *DNSOwnerStatusEntries {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSOwnerStatusEntries)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProvider) DeepCopyInto(out *DNSProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider.
+func (in *DNSProvider) DeepCopy() *DNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProviderList) DeepCopyInto(out *DNSProviderList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNSProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProviderList.
+func (in *DNSProviderList) DeepCopy() *DNSProviderList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProviderList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSProviderList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProviderSpec) DeepCopyInto(out *DNSProviderSpec) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSSelection)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSSelection)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProviderSpec.
+func (in *DNSProviderSpec) DeepCopy() *DNSProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProviderStatus) DeepCopyInto(out *DNSProviderStatus) {
+ *out = *in
+ if in.Message != nil {
+ in, out := &in.Message, &out.Message
+ *out = new(string)
+ **out = **in
+ }
+ in.Domains.DeepCopyInto(&out.Domains)
+ in.Zones.DeepCopyInto(&out.Zones)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProviderStatus.
+func (in *DNSProviderStatus) DeepCopy() *DNSProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSSelection) DeepCopyInto(out *DNSSelection) {
+ *out = *in
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Exclude != nil {
+ in, out := &in.Exclude, &out.Exclude
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSelection.
+func (in *DNSSelection) DeepCopy() *DNSSelection {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSSelection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSSelectionStatus) DeepCopyInto(out *DNSSelectionStatus) {
+ *out = *in
+ if in.Included != nil {
+ in, out := &in.Included, &out.Included
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Excluded != nil {
+ in, out := &in.Excluded, &out.Excluded
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSelectionStatus.
+func (in *DNSSelectionStatus) DeepCopy() *DNSSelectionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSSelectionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EntryReference) DeepCopyInto(out *EntryReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntryReference.
+func (in *EntryReference) DeepCopy() *EntryReference {
+ if in == nil {
+ return nil
+ }
+ out := new(EntryReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceReference) DeepCopyInto(out *ResourceReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceReference.
+func (in *ResourceReference) DeepCopy() *ResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener-resource-manager/LICENSE.md b/vendor/github.com/gardener/gardener-resource-manager/LICENSE.md
new file mode 100644
index 0000000..065c127
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/LICENSE.md
@@ -0,0 +1,288 @@
+```
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+```
+
+## APIs
+
+This project may include APIs to SAP or third party products or services. The use of these APIs, products and services may be subject to additional agreements. In no event shall the application of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products or services that would alter, expand, be inconsistent with, or supersede any terms of these additional agreements. API means application programming interfaces, as well as their respective specifications and implementing code that allows other software products to communicate with or call on SAP or third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and ABAP calls or other user exits) and may be made available through SAP or third party products, SDKs, documentation or other media.
+
+## Subcomponents
+
+This project includes the following subcomponents that are subject to separate license terms.
+Your use of these subcomponents is subject to the separate license terms applicable to
+each subcomponent.
+
+Gardener.
+https://github.com/gardener/gardener.
+Copyright (c) 2019 SAP SE or an SAP affiliate company.
+Apache 2 license (https://github.com/gardener/gardener/blob/master/LICENSE.md).
+
+controller-runtime.
+https://sigs.k8s.io/controller-runtime.
+Copyright 2019 The Kubernetes Authors.
+Apache 2 license (https://sigs.k8s.io/controller-runtime/LICENSE).
+
+API.
+https://git.k8s.io/api.
+Copyright 2019 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/api/LICENSE).
+
+APIMachinery.
+https://git.k8s.io/apimachinery.
+Copyright 2019 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/apimachinery/LICENSE).
+
+Client-Go.
+https://git.k8s.io/client-go.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/client-go/LICENSE).
+
+YAML marshaling and unmarshalling support for Go.
+gopkg.in/yaml.v2.
+Copyright 2011-2016 Canonical Ltd.
+Apache 2 license (https://github.com/go-yaml/yaml/blob/v2/LICENSE)
+
+Packr.
+https://github.com/gobuffalo/packr
+Copyright (c) 2016 Mark Bates.
+MIT license (https://github.com/gobuffalo/packr/blob/master/LICENSE.txt)
+
+Cobra.
+https://github.com/spf13/cobra
+Copyright 2019 Steve Francia.
+Apache 2 license (https://github.com/spf13/cobra/blob/master/LICENSE.txt)
+
+Ginkgo.
+https://github.com/onsi/ginkgo.
+Copyright (c) 2013-2014 Onsi Fakhouri.
+MIT license (https://github.com/onsi/ginkgo/blob/master/LICENSE)
+
+Gomega.
+github.com/onsi/gomega.
+Copyright (c) 2013-2014 Onsi Fakhouri.
+MIT license (https://github.com/onsi/gomega/blob/master/LICENSE)
+
+------
+## MIT License
+
+ The MIT License (MIT)
+
+ Copyright (c)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
diff --git a/vendor/github.com/gardener/gardener-resource-manager/NOTICE.md b/vendor/github.com/gardener/gardener-resource-manager/NOTICE.md
new file mode 100644
index 0000000..6e3aae0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/NOTICE.md
@@ -0,0 +1,15 @@
+## Gardener Resource Manager
+Copyright (c) 2017-2019 SAP SE or an SAP affiliate company. All rights reserved.
+
+## Seed Source
+
+The source code of this component was seeded based on a copy of the following files from [github.com/kubernetes-sigs](github.com/kubernetes-sigs):
+
+controller-runtime.
+https://sigs.k8s.io/controller-runtime.
+Copyright 2018 The Kubernetes Authors.
+Apache 2 license (https://sigs.k8s.io/controller-runtime/LICENSE).
+
+Version: 0.1.9.
+Commit-ID: f6f0bc9611363b43664d08fb097ab13243ef621d
+Commit-Message: Merge pull request #263 from DirectXMan12/release/v0.1.9
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go
new file mode 100644
index 0000000..112a3f1
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go
@@ -0,0 +1,18 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resources
+
+// GroupName is the group name use in this package
+const GroupName = "resources.gardener.cloud"
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go
new file mode 100644
index 0000000..19823fc
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=github.com/gardener/gardener-resource-manager/pkg/apis/resources
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+//go:generate ../../../../hack/update-codegen.sh
+
+// Package v1alpha1 contains the configuration of the Gardener Resource Manager.
+// +groupName=resources.gardener.cloud
+package v1alpha1 // import "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1"
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go
new file mode 100644
index 0000000..ec3097f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ resources "github.com/gardener/gardener-resource-manager/pkg/apis/resources"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: resources.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &ManagedResource{},
+ &ManagedResourceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go
new file mode 100644
index 0000000..ca431ee
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // Ignore is an annotation that dictates whether a resources should be ignored during
+ // reconciliation.
+ Ignore = "resources.gardener.cloud/ignore"
+ // DeleteOnInvalidUpdate is a constant for an annotation on a resource managed by a ManagedResource. If set to
+ // true then the controller will delete the object in case it faces an "Invalid" response during an update operation.
+ DeleteOnInvalidUpdate = "resources.gardener.cloud/delete-on-invalid-update"
+ // KeepObject is a constant for an annotation on a resource managed by a ManagedResource. If set to
+ // true then the controller will not delete the object in case it is removed from the ManagedResource or the
+ // ManagedResource itself is deleted.
+ KeepObject = "resources.gardener.cloud/keep-object"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ManagedResource describes a list of managed resources.
+type ManagedResource struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // Spec contains the specification of this managed resource.
+ Spec ManagedResourceSpec `json:"spec,omitempty"`
+ // Status contains the status of this managed resource.
+ Status ManagedResourceStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ManagedResourceList is a list of ManagedResource resources.
+type ManagedResourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of ManagedResource.
+ Items []ManagedResource `json:"items"`
+}
+
+type ManagedResourceSpec struct {
+ // Class holds the resource class used to control the responsibility for multiple resource manager instances
+ // +optional
+ Class *string `json:"class,omitempty"`
+ // SecretRefs is a list of secret references.
+ SecretRefs []corev1.LocalObjectReference `json:"secretRefs"`
+ // InjectLabels injects the provided labels into every resource that is part of the referenced secrets.
+ // +optional
+ InjectLabels map[string]string `json:"injectLabels,omitempty"`
+ // ForceOverwriteLabels specifies that all existing labels should be overwritten. Defaults to false.
+ // +optional
+ ForceOverwriteLabels *bool `json:"forceOverwriteLabels,omitempty"`
+ // ForceOverwriteAnnotations specifies that all existing annotations should be overwritten. Defaults to false.
+ // +optional
+ ForceOverwriteAnnotations *bool `json:"forceOverwriteAnnotations,omitempty"`
+ // KeepObjects specifies whether the objects should be kept although the managed resource has already been deleted.
+ // Defaults to false.
+ // +optional
+ KeepObjects *bool `json:"keepObjects,omitempty"`
+ // Equivalences specifies possible group/kind equivalences for objects.
+ // +optional
+ Equivalences [][]metav1.GroupKind `json:"equivalences,omitempty"`
+ // DeletePersistentVolumeClaims specifies if PersistentVolumeClaims created by StatefulSets, which are managed by this
+ // resource, should also be deleted when the corresponding StatefulSet is deleted (defaults to false).
+ // +optional
+ DeletePersistentVolumeClaims *bool `json:"deletePersistentVolumeClaims,omitempty"`
+}
+
+// ManagedResourceStatus is the status of a managed resource.
+type ManagedResourceStatus struct {
+ Conditions []ManagedResourceCondition `json:"conditions,omitempty"`
+ // ObservedGeneration is the most recent generation observed for this resource.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+ // Resources is a list of objects that have been created.
+ // +optional
+ Resources []ObjectReference `json:"resources,omitempty"`
+}
+
+type ObjectReference struct {
+ corev1.ObjectReference `json:",inline"`
+ // Labels is a map of labels that were used during last update of the resource.
+ Labels map[string]string `json:"labels,omitempty"`
+ // Annotations is a map of annotations that were used during last update of the resource.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// ConditionType is the type of a condition.
+type ConditionType string
+
+const (
+ // ResourcesApplied is a condition type that indicates whether all resources are applied to the target cluster.
+ ResourcesApplied ConditionType = "ResourcesApplied"
+ // ResourcesHealthy is a condition type that indicates whether all resources are present and healthy.
+ ResourcesHealthy ConditionType = "ResourcesHealthy"
+)
+
+// ConditionStatus is the status of a condition.
+type ConditionStatus string
+
+// These are valid condition statuses.
+const (
+ // ConditionTrue means a resource is in the condition.
+ ConditionTrue ConditionStatus = "True"
+ // ConditionFalse means a resource is not in the condition.
+ ConditionFalse ConditionStatus = "False"
+ // ConditionUnknown means that the controller can't decide if a resource is in the condition or not
+ ConditionUnknown ConditionStatus = "Unknown"
+ // ConditionProgressing means that the controller is currently acting on the resource and the condition is therefore progressing.
+ ConditionProgressing ConditionStatus = "Progressing"
+)
+
+// These are well-known reasons for ManagedResourceConditions.
+const (
+ // ConditionApplySucceeded indicates that the `ResourcesApplied` condition is `True`,
+ // because all resources have been applied successfully.
+ ConditionApplySucceeded = "ApplySucceeded"
+ // ConditionApplyFailed indicates that the `ResourcesApplied` condition is `False`,
+ // because applying the resources failed.
+ ConditionApplyFailed = "ApplyFailed"
+ // ConditionDecodingFailed indicates that the `ResourcesApplied` condition is `False`,
+ // because decoding the resources of the ManagedResource failed.
+ ConditionDecodingFailed = "DecodingFailed"
+ // ConditionApplyProgressing indicates that the `ResourcesApplied` condition is `Progressing`,
+ // because the resources are currently being reconciled.
+ ConditionApplyProgressing = "ApplyProgressing"
+ // ConditionDeletionFailed indicates that the `ResourcesApplied` condition is `False`,
+ // because deleting the resources failed.
+ ConditionDeletionFailed = "DeletionFailed"
+ // ConditionDeletionPending indicates that the `ResourcesApplied` condition is `Progressing`,
+ // because the deletion of some resources are still pending.
+ ConditionDeletionPending = "DeletionPending"
+ // ConditionHealthChecksPending indicates that the `ResourcesHealthy` condition is `Unknown`,
+ // because the health checks have not been completely executed yet for the current set of resources.
+ ConditionHealthChecksPending = "HealthChecksPending"
+)
+
+// ManagedResourceCondition describes the state of a deployment at a certain period.
+type ManagedResourceCondition struct {
+ // Type of the ManagedResource condition.
+ Type ConditionType `json:"type"`
+ // Status of the ManagedResource condition.
+ Status ConditionStatus `json:"status"`
+ // Last time the condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..0ed4b79
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,231 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedResource) DeepCopyInto(out *ManagedResource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResource.
+func (in *ManagedResource) DeepCopy() *ManagedResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ManagedResource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedResourceCondition) DeepCopyInto(out *ManagedResourceCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceCondition.
+func (in *ManagedResourceCondition) DeepCopy() *ManagedResourceCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedResourceCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedResourceList) DeepCopyInto(out *ManagedResourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ManagedResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceList.
+func (in *ManagedResourceList) DeepCopy() *ManagedResourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedResourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ManagedResourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedResourceSpec) DeepCopyInto(out *ManagedResourceSpec) {
+ *out = *in
+ if in.Class != nil {
+ in, out := &in.Class, &out.Class
+ *out = new(string)
+ **out = **in
+ }
+ if in.SecretRefs != nil {
+ in, out := &in.SecretRefs, &out.SecretRefs
+ *out = make([]v1.LocalObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.InjectLabels != nil {
+ in, out := &in.InjectLabels, &out.InjectLabels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ForceOverwriteLabels != nil {
+ in, out := &in.ForceOverwriteLabels, &out.ForceOverwriteLabels
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ForceOverwriteAnnotations != nil {
+ in, out := &in.ForceOverwriteAnnotations, &out.ForceOverwriteAnnotations
+ *out = new(bool)
+ **out = **in
+ }
+ if in.KeepObjects != nil {
+ in, out := &in.KeepObjects, &out.KeepObjects
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Equivalences != nil {
+ in, out := &in.Equivalences, &out.Equivalences
+ *out = make([][]metav1.GroupKind, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = make([]metav1.GroupKind, len(*in))
+ copy(*out, *in)
+ }
+ }
+ }
+ if in.DeletePersistentVolumeClaims != nil {
+ in, out := &in.DeletePersistentVolumeClaims, &out.DeletePersistentVolumeClaims
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceSpec.
+func (in *ManagedResourceSpec) DeepCopy() *ManagedResourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedResourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedResourceStatus) DeepCopyInto(out *ManagedResourceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ManagedResourceCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ObjectReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceStatus.
+func (in *ManagedResourceStatus) DeepCopy() *ManagedResourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedResourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+ *out = *in
+ out.ObjectReference = in.ObjectReference
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedresources.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedresources.go
new file mode 100644
index 0000000..90edf98
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedresources.go
@@ -0,0 +1,120 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package manager
+
+import (
+ "context"
+
+ resourcesv1alpha1 "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+type ManagedResource struct {
+ client client.Client
+ resource *resourcesv1alpha1.ManagedResource
+}
+
+func NewManagedResource(client client.Client) *ManagedResource {
+ return &ManagedResource{
+ client: client,
+ resource: &resourcesv1alpha1.ManagedResource{},
+ }
+}
+
+func (m *ManagedResource) WithNamespacedName(namespace, name string) *ManagedResource {
+ m.resource.Namespace = namespace
+ m.resource.Name = name
+ return m
+}
+
+func (m *ManagedResource) WithLabels(labels map[string]string) *ManagedResource {
+ m.resource.Labels = labels
+ return m
+}
+
+func (m *ManagedResource) WithAnnotations(annotations map[string]string) *ManagedResource {
+ m.resource.Annotations = annotations
+ return m
+}
+
+func (m *ManagedResource) WithClass(name string) *ManagedResource {
+ if name == "" {
+ m.resource.Spec.Class = nil
+ } else {
+ m.resource.Spec.Class = &name
+ }
+ return m
+}
+
+func (m *ManagedResource) WithSecretRef(secretRefName string) *ManagedResource {
+ m.resource.Spec.SecretRefs = append(m.resource.Spec.SecretRefs, corev1.LocalObjectReference{Name: secretRefName})
+ return m
+}
+
+func (m *ManagedResource) WithSecretRefs(secretRefs []corev1.LocalObjectReference) *ManagedResource {
+ m.resource.Spec.SecretRefs = append(m.resource.Spec.SecretRefs, secretRefs...)
+ return m
+}
+
+func (m *ManagedResource) WithInjectedLabels(labelsToInject map[string]string) *ManagedResource {
+ m.resource.Spec.InjectLabels = labelsToInject
+ return m
+}
+
+func (m *ManagedResource) ForceOverwriteAnnotations(v bool) *ManagedResource {
+ m.resource.Spec.ForceOverwriteAnnotations = &v
+ return m
+}
+
+func (m *ManagedResource) ForceOverwriteLabels(v bool) *ManagedResource {
+ m.resource.Spec.ForceOverwriteLabels = &v
+ return m
+}
+
+func (m *ManagedResource) KeepObjects(v bool) *ManagedResource {
+ m.resource.Spec.KeepObjects = &v
+ return m
+}
+
+func (m *ManagedResource) DeletePersistentVolumeClaims(v bool) *ManagedResource {
+ m.resource.Spec.DeletePersistentVolumeClaims = &v
+ return m
+}
+
+func (m *ManagedResource) Reconcile(ctx context.Context) error {
+ resource := &resourcesv1alpha1.ManagedResource{
+ ObjectMeta: metav1.ObjectMeta{Name: m.resource.Name, Namespace: m.resource.Namespace},
+ }
+
+ _, err := controllerutil.CreateOrUpdate(ctx, m.client, resource, func() error {
+ resource.Labels = m.resource.Labels
+ resource.Annotations = m.resource.Annotations
+ resource.Spec = m.resource.Spec
+ return nil
+ })
+ return err
+}
+
+func (m *ManagedResource) Delete(ctx context.Context) error {
+ if err := m.client.Delete(ctx, m.resource); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedsecrets.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedsecrets.go
new file mode 100644
index 0000000..d04f7cf
--- /dev/null
+++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/manager/managedsecrets.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package manager
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+type Secret struct {
+ client client.Client
+
+ keyValues map[string]string
+ secret *corev1.Secret
+}
+
+func NewSecret(client client.Client) *Secret {
+ return &Secret{
+ client: client,
+ keyValues: make(map[string]string),
+ secret: &corev1.Secret{},
+ }
+}
+
+func (s *Secret) WithNamespacedName(namespace, name string) *Secret {
+ s.secret.Namespace = namespace
+ s.secret.Name = name
+ return s
+}
+
+func (s *Secret) WithLabels(labels map[string]string) *Secret {
+ s.secret.Labels = labels
+ return s
+}
+
+func (s *Secret) WithAnnotations(annotations map[string]string) *Secret {
+ s.secret.Annotations = annotations
+ return s
+}
+
+func (s *Secret) WithKeyValues(keyValues map[string][]byte) *Secret {
+ s.secret.Data = keyValues
+ return s
+}
+
+func (s *Secret) Reconcile(ctx context.Context) error {
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: s.secret.Name, Namespace: s.secret.Namespace},
+ }
+
+ _, err := controllerutil.CreateOrUpdate(ctx, s.client, secret, func() error {
+ secret.Labels = s.secret.Labels
+ secret.Annotations = s.secret.Annotations
+ secret.Type = corev1.SecretTypeOpaque
+ secret.Data = s.secret.Data
+ return nil
+ })
+ return err
+}
+
+func (s *Secret) Delete(ctx context.Context) error {
+ if err := s.client.Delete(ctx, s.secret); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+type Secrets struct {
+ client client.Client
+
+ secrets []Secret
+}
+
+func NewSecrets(client client.Client) *Secrets {
+ return &Secrets{
+ client: client,
+ secrets: []Secret{},
+ }
+}
+
+func (s *Secrets) WithSecretList(secrets []Secret) *Secrets {
+ s.secrets = append(s.secrets, secrets...)
+ return s
+}
+
+func (s *Secrets) WithSecret(secrets Secret) *Secrets {
+ s.secrets = append(s.secrets, secrets)
+ return s
+}
+
+func (s *Secrets) Reconcile(ctx context.Context) error {
+ for _, secret := range s.secrets {
+ if err := secret.Reconcile(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *Secrets) Delete(ctx context.Context) error {
+ for _, secret := range s.secrets {
+ if err := secret.Delete(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/bug.md b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/bug.md
new file mode 100644
index 0000000..3be7585
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/bug.md
@@ -0,0 +1,38 @@
+---
+name: Bug Report
+about: Report a bug encountered while operating Gardener
+labels: kind/bug
+
+---
+
+**How to categorize this issue?**
+
+/area TODO
+/kind bug
+/priority normal
+
+**What happened**:
+
+**What you expected to happen**:
+
+**How to reproduce it (as minimally and precisely as possible)**:
+
+**Anything else we need to know?**:
+
+**Environment**:
+
+- Gardener version:
+- Kubernetes version (use `kubectl version`):
+- Cloud provider or hardware configuration:
+- Others:
diff --git a/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/doc.go b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/doc.go
new file mode 100644
index 0000000..7d27222
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package imports GitHub related templates - it is to force `go mod` to see them as dependencies.
+package issue_template
diff --git a/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/feature.md b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/feature.md
new file mode 100644
index 0000000..1dcd7a6
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/feature.md
@@ -0,0 +1,27 @@
+---
+name: Enhancement Request
+about: Suggest an enhancement to the Gardener project
+labels: kind/enhancement
+
+---
+
+**How to categorize this issue?**
+
+/area TODO
+/kind enhancement
+/priority normal
+
+**What would you like to be added**:
+
+**Why is this needed**:
diff --git a/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/flaking-test.md b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/flaking-test.md
new file mode 100644
index 0000000..108e4a8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/flaking-test.md
@@ -0,0 +1,35 @@
+---
+name: Flaking Test
+about: Report flaky tests or jobs in Gardener CI
+title: "[Flaky Test] FLAKING TEST/SUITE"
+labels: kind/flake
+
+---
+
+
+
+**How to categorize this issue?**
+
+/area testing
+/kind flake
+/priority normal
+
+**Which test(s)/suite(s) are flaking**:
+
+**CI link**:
+
+**Reason for failure**:
+
+**Anything else we need to know**:
+
diff --git a/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/support.md b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/support.md
new file mode 100644
index 0000000..e646f03
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/ISSUE_TEMPLATE/support.md
@@ -0,0 +1,14 @@
+---
+name: Support Request
+about: Support request or question relating to Gardener
+labels: kind/question
+
+---
+
+
diff --git a/vendor/github.com/gardener/gardener/.github/doc.go b/vendor/github.com/gardener/gardener/.github/doc.go
new file mode 100644
index 0000000..8b24383
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package imports GitHub related templates - it is to force `go mod` to see them as dependencies.
+package github
diff --git a/vendor/github.com/gardener/gardener/.github/pull_request_template.md b/vendor/github.com/gardener/gardener/.github/pull_request_template.md
new file mode 100644
index 0000000..66a8b1b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/.github/pull_request_template.md
@@ -0,0 +1,40 @@
+**How to categorize this PR?**
+
+/area TODO
+/kind TODO
+/priority normal
+
+**What this PR does / why we need it**:
+
+**Which issue(s) this PR fixes**:
+Fixes #
+
+**Special notes for your reviewer**:
+
+**Release note**:
+
+```other operator
+
+```
diff --git a/vendor/github.com/gardener/gardener/LICENSE.md b/vendor/github.com/gardener/gardener/LICENSE.md
new file mode 100644
index 0000000..1a9f9ce
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/LICENSE.md
@@ -0,0 +1,714 @@
+```
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+```
+
+## APIs
+
+This project may include APIs to SAP or third party products or services. The use of these APIs, products and services may be subject to additional agreements. In no event shall the application of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products or services that would alter, expand, be inconsistent with, or supersede any terms of these additional agreements. API means application programming interfaces, as well as their respective specifications and implementing code that allows other software products to communicate with or call on SAP or third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and ABAP calls or other user exits) and may be made available through SAP or third party products, SDKs, documentation or other media.
+
+## Subcomponents
+
+This project includes the following subcomponents that are subject to separate license terms.
+Your use of these subcomponents is subject to the separate license terms applicable to
+each subcomponent.
+
+API.
+https://git.k8s.io/api.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/api/LICENSE).
+
+APIMachinery.
+https://git.k8s.io/apimachinery.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/apimachinery/LICENSE).
+
+APIServer.
+https://git.k8s.io/apiserver.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/apiserver/LICENSE).
+
+Client-Go.
+https://git.k8s.io/client-go.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/client-go/LICENSE).
+
+Code-Generator.
+https://git.k8s.io/code-generator.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/code-generator/LICENSE)
+
+Gengo.
+https://git.k8s.io/gengo.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/gengo/LICENSE)
+
+Helm.
+https://git.k8s.io/helm.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/helm/LICENSE)
+
+controller-runtime
+https://github.com/kubernetes-sigs/controller-runtime
+Copyright 2018 The Kubernetes Authors.
+Apache 2 license (https://github.com/kubernetes-sigs/controller-runtime/blob/master/LICENSE).
+
+Cobra
+https://github.com/spf13/cobra.
+Copyright 2015 Steve Francia.
+Apache 2 license (https://github.com/spf13/cobra/blob/master/LICENSE.txt)
+
+YAML marshaling and unmarshaling support for Go
+https://github.com/ghodss/yaml.
+Copyright (c) 2014 Sam Ghods
+MIT license (https://github.com/ghodss/yaml/blob/master/LICENSE)
+
+Logrus
+https://github.com/sirupsen/logrus.
+Copyright (c) 2014 Simon Eskildsen
+MIT license (https://github.com/sirupsen/logrus/blob/master/LICENSE)
+
+Prometheus Go Client Library.
+https://github.com/prometheus/client_golang.
+Copyright 2015 The Prometheus Authors.
+Apache 2 license (https://github.com/prometheus/client_golang/blob/master/LICENSE)
+
+SemVer
+https://github.com/Masterminds/semver.
+Copyright (C) 2014-2015, Matt Butcher and Matt Farina
+MIT license (https://github.com/Masterminds/semver/blob/master/LICENSE.txt)
+
+Cron.
+https://github.com/robfig/cron.
+Copyright (C) 2012 Rob Figueiredo.
+MIT license (https://github.com/robfig/cron/blob/master/LICENSE)
+
+errors
+https://github.com/pkg/errors
+Copyright (c) 2015, Dave Cheney.
+BSD 2-Clause "Simplified" License (https://github.com/pkg/errors/blob/master/LICENSE)
+
+go-multierror
+https://github.com/hashicorp/go-multierror
+Copyright (c) 2015, Hashicorp.
+MPL-2.0 (https://github.com/hashicorp/go-multierror/blob/master/LICENSE)
+
+go-jmespath
+https://github.com/jmespath/go-jmespath
+Copyright 2015 James Saryerwinnie
+Apache License, Version 2 (https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
+
+------
+## MIT License
+
+ The MIT License (MIT)
+
+ Copyright (c)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+------
+## MPL-2.0 License
+
+ Mozilla Public License Version 2.0
+ ==================================
+
+ 1. Definitions
+ --------------
+
+ 1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+ 1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+ 1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+ 1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+ 1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+ 1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+ 1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+ 1.8. "License"
+ means this document.
+
+ 1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+ 1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+ 1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+ 1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+ 1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+ 1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+ 2. License Grants and Conditions
+ --------------------------------
+
+ 2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ (a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ (b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+ 2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+ 2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ (a) for any code that a Contributor has removed from Covered Software;
+ or
+
+ (b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ (c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+ 2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+ 2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights
+ to grant the rights to its Contributions conveyed by this License.
+
+ 2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+ 2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+ in Section 2.1.
+
+ 3. Responsibilities
+ -------------------
+
+ 3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+ 3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ (a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+ (b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+ 3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+ 3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty,
+ or limitations of liability) contained within the Source Code Form of
+ the Covered Software, except that You may alter any license notices to
+ the extent required to remedy known factual inaccuracies.
+
+ 3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+ 4. Inability to Comply Due to Statute or Regulation
+ ---------------------------------------------------
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Software due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description must
+ be placed in a text file included with all distributions of the Covered
+ Software under this License. Except to the extent prohibited by statute
+ or regulation, such description must be sufficiently detailed for a
+ recipient of ordinary skill to be able to understand it.
+
+ 5. Termination
+ --------------
+
+ 5.1. The rights granted under this License will terminate automatically
+ if You fail to comply with any of its terms. However, if You become
+ compliant, then the rights granted under this License from a particular
+ Contributor are reinstated (a) provisionally, unless and until such
+ Contributor explicitly and finally terminates Your grants, and (b) on an
+ ongoing basis, if such Contributor fails to notify You of the
+ non-compliance by some reasonable means prior to 60 days after You have
+ come back into compliance. Moreover, Your grants from a particular
+ Contributor are reinstated on an ongoing basis if such Contributor
+ notifies You of the non-compliance by some reasonable means, this is the
+ first time You have received notice of non-compliance with this License
+ from such Contributor, and You become compliant prior to 30 days after
+ Your receipt of the notice.
+
+ 5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+ 5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+ end user license agreements (excluding distributors and resellers) which
+ have been validly granted by You or Your distributors under this License
+ prior to termination shall survive termination.
+
+ ************************************************************************
+ * *
+ * 6. Disclaimer of Warranty *
+ * ------------------------- *
+ * *
+ * Covered Software is provided under this License on an "as is" *
+ * basis, without warranty of any kind, either expressed, implied, or *
+ * statutory, including, without limitation, warranties that the *
+ * Covered Software is free of defects, merchantable, fit for a *
+ * particular purpose or non-infringing. The entire risk as to the *
+ * quality and performance of the Covered Software is with You. *
+ * Should any Covered Software prove defective in any respect, You *
+ * (not any Contributor) assume the cost of any necessary servicing, *
+ * repair, or correction. This disclaimer of warranty constitutes an *
+ * essential part of this License. No use of any Covered Software is *
+ * authorized under this License except under this disclaimer. *
+ * *
+ ************************************************************************
+
+ ************************************************************************
+ * *
+ * 7. Limitation of Liability *
+ * -------------------------- *
+ * *
+ * Under no circumstances and under no legal theory, whether tort *
+ * (including negligence), contract, or otherwise, shall any *
+ * Contributor, or anyone who distributes Covered Software as *
+ * permitted above, be liable to You for any direct, indirect, *
+ * special, incidental, or consequential damages of any character *
+ * including, without limitation, damages for lost profits, loss of *
+ * goodwill, work stoppage, computer failure or malfunction, or any *
+ * and all other commercial damages or losses, even if such party *
+ * shall have been informed of the possibility of such damages. This *
+ * limitation of liability shall not apply to liability for death or *
+ * personal injury resulting from such party's negligence to the *
+ * extent applicable law prohibits such limitation. Some *
+ * jurisdictions do not allow the exclusion or limitation of *
+ * incidental or consequential damages, so this exclusion and *
+ * limitation may not apply to You. *
+ * *
+ ************************************************************************
+
+ 8. Litigation
+ -------------
+
+ Any litigation relating to this License may be brought only in the
+ courts of a jurisdiction where the defendant maintains its principal
+ place of business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions.
+ Nothing in this Section shall prevent a party's ability to bring
+ cross-claims or counter-claims.
+
+ 9. Miscellaneous
+ ----------------
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides
+ that the language of a contract shall be construed against the drafter
+ shall not be used to construe this License against a Contributor.
+
+ 10. Versions of the License
+ ---------------------------
+
+ 10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+ 10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+ 10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+ 10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses
+
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+ Exhibit A - Source Code Form License Notice
+ -------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+ If it is not possible or desirable to put the notice in a particular
+ file, then You may include the notice in a location (such as a LICENSE
+ file in a relevant directory) where a recipient would be likely to look
+ for such a notice.
+
+ You may add additional accurate notices of copyright ownership.
+
+ Exhibit B - "Incompatible With Secondary Licenses" Notice
+ ---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
+
+------
+## BSD 2-Clause "Simplified" License
+
+ Copyright
+
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/gardener/gardener/NOTICE.md b/vendor/github.com/gardener/gardener/NOTICE.md
new file mode 100644
index 0000000..d6e6ad8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/NOTICE.md
@@ -0,0 +1,33 @@
+## Gardener
+Copyright (c) 2017-2019 SAP SE or an SAP affiliate company. All rights reserved.
+
+## Seed Source
+
+The source code of this component was seeded based on a copy of the following files from [github.com/kubernetes](github.com/kubernetes):
+
+Sample APIServer.
+http://git.k8s.io/sample-apiserver.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/sample-apiserver/LICENSE).
+
+Release: 1.9.
+Commit-ID: 7c9967f6de296b968505885781e9ed9fc65156c3
+Commit-Message: Merge remote-tracking branch 'origin/master' into release-1.9
+
+Kubernetes.
+https://git.k8s.io/kubernetes.
+Copyright 2017 The Kubernetes Authors.
+Apache 2 license (https://git.k8s.io/kubernetes/LICENSE).
+
+The following charts of the [`charts`](charts) directory were seeded based on a copy of the following origins:
+
+* [git.k8s.io/charts/stable/kube-state-metrics](https://git.k8s.io/charts/stable/kube-state-metrics)
+* [git.k8s.io/charts/stable/nginx-ingress](https://git.k8s.io/charts/stable/nginx-ingress)
+* [git.k8s.io/charts/stable/prometheus](https://git.k8s.io/charts/stable/prometheus)
+* [git.k8s.io/charts/stable/kubernetes-dashboard](https://git.k8s.io/charts/stable/kubernetes-dashboard)
+* [github.com/grafana/loki/blob/master/production/helm/loki/templates/statefulset.yaml](https://github.com/grafana/loki/blob/master/production/helm/loki/templates/statefulset.yaml)
+* [github.com/grafana/loki/blob/master/production/helm/loki/templates/service.yaml](https://github.com/grafana/loki/blob/master/production/helm/loki/templates/service.yaml)
+
+## Container Images
+
+The list of container images and versions deployed by the Gardener can be found [here](charts/images.yaml).
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/cluster.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cluster.go
new file mode 100644
index 0000000..8430ead
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cluster.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import "github.com/gardener/gardener/pkg/extensions"
+
+// Cluster contains the decoded resources of Gardener's extension Cluster resource.
+type Cluster = extensions.Cluster
+
+var (
+ // NewGardenDecoder returns a new Garden API decoder.
+ NewGardenDecoder = extensions.NewGardenDecoder
+ // GetCluster tries to read Gardener's Cluster extension resource in the given namespace.
+ GetCluster = extensions.GetCluster
+ // CloudProfileFromCluster returns the CloudProfile resource inside the Cluster resource.
+ CloudProfileFromCluster = extensions.CloudProfileFromCluster
+ // SeedFromCluster returns the Seed resource inside the Cluster resource.
+ SeedFromCluster = extensions.SeedFromCluster
+ // ShootFromCluster returns the Shoot resource inside the Cluster resource.
+ ShootFromCluster = extensions.ShootFromCluster
+ // GetShoot tries to read Gardener's Cluster extension resource in the given namespace and return the embedded Shoot resource.
+ GetShoot = extensions.GetShoot
+)
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/cmd.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/cmd.go
new file mode 100644
index 0000000..27f4ce9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/cmd.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "os"
+
+ "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+var (
+ // Log is log.Log. Exposed for testing.
+ Log = log.Log
+ // Exit calls os.Exit. Exposed for testing.
+ Exit = os.Exit
+)
+
+// LogErrAndExit logs the given error with msg and keysAndValues and calls `os.Exit(1)`.
+func LogErrAndExit(err error, msg string, keysAndValues ...interface{}) {
+ Log.Error(err, msg, keysAndValues...)
+ Exit(1)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go
new file mode 100644
index 0000000..ca216f5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go
@@ -0,0 +1,396 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+
+ "github.com/spf13/pflag"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+const (
+ // LeaderElectionFlag is the name of the command line flag to specify whether to do leader election or not.
+ LeaderElectionFlag = "leader-election"
+ // LeaderElectionIDFlag is the name of the command line flag to specify the leader election ID.
+ LeaderElectionIDFlag = "leader-election-id"
+ // LeaderElectionNamespaceFlag is the name of the command line flag to specify the leader election namespace.
+ LeaderElectionNamespaceFlag = "leader-election-namespace"
+ // WebhookServerHostFlag is the name of the command line flag to specify the webhook config host for 'url' mode.
+ WebhookServerHostFlag = "webhook-config-server-host"
+ // WebhookServerPortFlag is the name of the command line flag to specify the webhook server port.
+ WebhookServerPortFlag = "webhook-config-server-port"
+ // WebhookCertDirFlag is the name of the command line flag to specify the webhook certificate directory.
+ WebhookCertDirFlag = "webhook-config-cert-dir"
+
+ // MaxConcurrentReconcilesFlag is the name of the command line flag to specify the maximum number of
+ // concurrent reconciliations a controller can do.
+ MaxConcurrentReconcilesFlag = "max-concurrent-reconciles"
+
+ // KubeconfigFlag is the name of the command line flag to specify a kubeconfig used to retrieve
+ // a rest.Config for a manager.Manager.
+ KubeconfigFlag = clientcmd.RecommendedConfigPathFlag
+ // MasterURLFlag is the name of the command line flag to specify the master URL override for
+ // a rest.Config of a manager.Manager.
+ MasterURLFlag = "master"
+
+ // DisableFlag is the name of the command line flag to disable individual controllers.
+ DisableFlag = "disable-controllers"
+)
+
+// LeaderElectionNameID returns a leader election ID for the given name.
+func LeaderElectionNameID(name string) string {
+ return fmt.Sprintf("%s-leader-election", name)
+}
+
+// Flagger adds flags to a given FlagSet.
+type Flagger interface {
+ // AddFlags adds the flags of this Flagger to the given FlagSet.
+ AddFlags(*pflag.FlagSet)
+}
+
+type prefixedFlagger struct {
+ prefix string
+ flagger Flagger
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (p *prefixedFlagger) AddFlags(fs *pflag.FlagSet) {
+ temp := pflag.NewFlagSet("", pflag.ExitOnError)
+ p.flagger.AddFlags(temp)
+ temp.VisitAll(func(flag *pflag.Flag) {
+ flag.Name = fmt.Sprintf("%s%s", p.prefix, flag.Name)
+ })
+ fs.AddFlagSet(temp)
+}
+
+// PrefixFlagger creates a flagger that prefixes all its flags with the given prefix.
+func PrefixFlagger(prefix string, flagger Flagger) Flagger {
+ return &prefixedFlagger{prefix, flagger}
+}
+
+// PrefixOption creates an option that prefixes all its flags with the given prefix.
+func PrefixOption(prefix string, option Option) Option {
+ return struct {
+ Flagger
+ Completer
+ }{PrefixFlagger(prefix, option), option}
+}
+
+// Completer completes some work.
+type Completer interface {
+ // Complete completes the work, optionally returning an error.
+ Complete() error
+}
+
+// Option is a Flagger and Completer.
+// It sets command line flags and does some work when the flags have been parsed, optionally producing
+// an error.
+type Option interface {
+ Flagger
+ Completer
+}
+
+// OptionAggregator is a builder that aggregates multiple options.
+type OptionAggregator []Option
+
+// NewOptionAggregator instantiates a new OptionAggregator and registers all given options.
+func NewOptionAggregator(options ...Option) OptionAggregator {
+ var builder OptionAggregator
+ builder.Register(options...)
+ return builder
+}
+
+// Register registers the given options in this OptionAggregator.
+func (b *OptionAggregator) Register(options ...Option) {
+ *b = append(*b, options...)
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (b *OptionAggregator) AddFlags(fs *pflag.FlagSet) {
+ for _, option := range *b {
+ option.AddFlags(fs)
+ }
+}
+
+// Complete implements Completer.Complete.
+func (b *OptionAggregator) Complete() error {
+ for _, option := range *b {
+ if err := option.Complete(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ManagerOptions are command line options that can be set for manager.Options.
+type ManagerOptions struct {
+ // LeaderElection is whether leader election is turned on or not.
+ LeaderElection bool
+ // LeaderElectionID is the id to do leader election with.
+ LeaderElectionID string
+ // LeaderElectionNamespace is the namespace to do leader election in.
+ LeaderElectionNamespace string
+ // WebhookServerHost is the host for the webhook server.
+ WebhookServerHost string
+ // WebhookServerPort is the port for the webhook server.
+ WebhookServerPort int
+ // WebhookCertDir is the directory that contains the webhook server key and certificate.
+ WebhookCertDir string
+
+ config *ManagerConfig
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (m *ManagerOptions) AddFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&m.LeaderElection, LeaderElectionFlag, m.LeaderElection, "Whether to use leader election or not when running this controller manager.")
+ fs.StringVar(&m.LeaderElectionID, LeaderElectionIDFlag, m.LeaderElectionID, "The leader election id to use.")
+ fs.StringVar(&m.LeaderElectionNamespace, LeaderElectionNamespaceFlag, m.LeaderElectionNamespace, "The namespace to do leader election in.")
+ fs.StringVar(&m.WebhookServerHost, WebhookServerHostFlag, m.WebhookServerHost, "The webhook server host.")
+ fs.IntVar(&m.WebhookServerPort, WebhookServerPortFlag, m.WebhookServerPort, "The webhook server port.")
+ fs.StringVar(&m.WebhookCertDir, WebhookCertDirFlag, m.WebhookCertDir, "The directory that contains the webhook server key and certificate.")
+}
+
+// Complete implements Completer.Complete.
+func (m *ManagerOptions) Complete() error {
+ m.config = &ManagerConfig{m.LeaderElection, m.LeaderElectionID, m.LeaderElectionNamespace, m.WebhookServerHost, m.WebhookServerPort, m.WebhookCertDir}
+ return nil
+}
+
+// Completed returns the completed ManagerConfig. Only call this if `Complete` was successful.
+func (m *ManagerOptions) Completed() *ManagerConfig {
+ return m.config
+}
+
+// ManagerConfig is a completed manager configuration.
+type ManagerConfig struct {
+ // LeaderElection is whether leader election is turned on or not.
+ LeaderElection bool
+ // LeaderElectionID is the id to do leader election with.
+ LeaderElectionID string
+ // LeaderElectionNamespace is the namespace to do leader election in.
+ LeaderElectionNamespace string
+ // WebhookServerHost is the host for the webhook server.
+ WebhookServerHost string
+ // WebhookServerPort is the port for the webhook server.
+ WebhookServerPort int
+ // WebhookCertDir is the directory that contains the webhook server key and certificate.
+ WebhookCertDir string
+}
+
+// Apply sets the values of this ManagerConfig in the given manager.Options.
+func (c *ManagerConfig) Apply(opts *manager.Options) {
+ opts.LeaderElection = c.LeaderElection
+ opts.LeaderElectionID = c.LeaderElectionID
+ opts.LeaderElectionNamespace = c.LeaderElectionNamespace
+ opts.Host = c.WebhookServerHost
+ opts.Port = c.WebhookServerPort
+ opts.CertDir = c.WebhookCertDir
+}
+
+// Options initializes empty manager.Options, applies the set values and returns it.
+func (c *ManagerConfig) Options() manager.Options {
+ var opts manager.Options
+ c.Apply(&opts)
+ return opts
+}
+
+// ControllerOptions are command line options that can be set for controller.Options.
+type ControllerOptions struct {
+ // MaxConcurrentReconciles are the maximum concurrent reconciles.
+ MaxConcurrentReconciles int
+
+ config *ControllerConfig
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (c *ControllerOptions) AddFlags(fs *pflag.FlagSet) {
+ fs.IntVar(&c.MaxConcurrentReconciles, MaxConcurrentReconcilesFlag, c.MaxConcurrentReconciles, "The maximum number of concurrent reconciliations.")
+}
+
+// Complete implements Completer.Complete.
+func (c *ControllerOptions) Complete() error {
+ c.config = &ControllerConfig{c.MaxConcurrentReconciles}
+ return nil
+}
+
+// Completed returns the completed ControllerConfig. Only call this if `Complete` was successful.
+func (c *ControllerOptions) Completed() *ControllerConfig {
+ return c.config
+}
+
+// ControllerConfig is a completed controller configuration.
+type ControllerConfig struct {
+ // MaxConcurrentReconciles is the maximum number of concurrent reconciles.
+ MaxConcurrentReconciles int
+}
+
+// Apply sets the values of this ControllerConfig in the given controller.Options.
+func (c *ControllerConfig) Apply(opts *controller.Options) {
+ opts.MaxConcurrentReconciles = c.MaxConcurrentReconciles
+}
+
+// Options initializes empty controller.Options, applies the set values and returns it.
+func (c *ControllerConfig) Options() controller.Options {
+ var opts controller.Options
+ c.Apply(&opts)
+ return opts
+}
+
+// RESTOptions are command line options that can be set for rest.Config.
+type RESTOptions struct {
+ // Kubeconfig is the path to a kubeconfig.
+ Kubeconfig string
+ // MasterURL is an override for the URL in a kubeconfig. Only used if out-of-cluster.
+ MasterURL string
+
+ config *RESTConfig
+}
+
+// RESTConfig is a completed REST configuration.
+type RESTConfig struct {
+ // Config is the rest.Config.
+ Config *rest.Config
+}
+
+var (
+ // BuildConfigFromFlags creates a build configuration from the given flags. Exposed for testing.
+ BuildConfigFromFlags = clientcmd.BuildConfigFromFlags
+ // InClusterConfig obtains the current in-cluster config. Exposed for testing.
+ InClusterConfig = rest.InClusterConfig
+ // Getenv obtains the environment variable with the given name. Exposed for testing.
+ Getenv = os.Getenv
+ // RecommendedHomeFile is the recommended location of the kubeconfig. Exposed for testing.
+ RecommendedHomeFile = clientcmd.RecommendedHomeFile
+)
+
+func (r *RESTOptions) buildConfig() (*rest.Config, error) {
+ // If a flag is specified with the config location, use that
+ if len(r.Kubeconfig) > 0 {
+ return BuildConfigFromFlags(r.MasterURL, r.Kubeconfig)
+ }
+ // If an env variable is specified with the config location, use that
+ if kubeconfig := Getenv(clientcmd.RecommendedConfigPathEnvVar); len(kubeconfig) > 0 {
+ return BuildConfigFromFlags(r.MasterURL, kubeconfig)
+ }
+ // If no explicit location, try the in-cluster config
+ if c, err := InClusterConfig(); err == nil {
+ return c, nil
+ }
+
+ return BuildConfigFromFlags("", RecommendedHomeFile)
+}
+
+// Complete implements RESTCompleter.Complete.
+func (r *RESTOptions) Complete() error {
+ config, err := r.buildConfig()
+ if err != nil {
+ return err
+ }
+
+ r.config = &RESTConfig{config}
+ return nil
+}
+
+// Completed returns the completed RESTConfig. Only call this if `Complete` was successful.
+func (r *RESTOptions) Completed() *RESTConfig {
+ return r.config
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (r *RESTOptions) AddFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&r.Kubeconfig, KubeconfigFlag, "", "Paths to a kubeconfig. Only required if out-of-cluster.")
+ fs.StringVar(&r.MasterURL, MasterURLFlag, "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
+}
+
+// SwitchOptions are options to build an AddToManager function that filters the disabled controllers.
+type SwitchOptions struct {
+ Disabled []string
+
+ nameToAddToManager map[string]func(manager.Manager) error
+ addToManagerBuilder extensionscontroller.AddToManagerBuilder
+}
+
+// Register registers the given NameToControllerFuncs in the options.
+func (d *SwitchOptions) Register(pairs ...NameToAddToManagerFunc) {
+ for _, pair := range pairs {
+ d.nameToAddToManager[pair.Name] = pair.Func
+ }
+}
+
+// NameToAddToManagerFunc binds a specific name to a controller's AddToManager function.
+type NameToAddToManagerFunc struct {
+ Name string
+ Func func(manager.Manager) error
+}
+
+// Switch binds the given name to the given AddToManager function.
+func Switch(name string, f func(manager.Manager) error) NameToAddToManagerFunc {
+ return NameToAddToManagerFunc{
+ Name: name,
+ Func: f,
+ }
+}
+
+// NewSwitchOptions creates new SwitchOptions with the given initial pairs.
+func NewSwitchOptions(pairs ...NameToAddToManagerFunc) *SwitchOptions {
+ opts := SwitchOptions{nameToAddToManager: make(map[string]func(manager.Manager) error)}
+ opts.Register(pairs...)
+ return &opts
+}
+
+// AddFlags implements Option.
+func (d *SwitchOptions) AddFlags(fs *pflag.FlagSet) {
+ controllerNames := make([]string, 0, len(d.nameToAddToManager))
+ for name := range d.nameToAddToManager {
+ controllerNames = append(controllerNames, name)
+ }
+ fs.StringSliceVar(&d.Disabled, DisableFlag, d.Disabled, fmt.Sprintf("List of controllers to disable %v", controllerNames))
+}
+
+// Complete implements Option.
+func (d *SwitchOptions) Complete() error {
+ disabled := sets.NewString()
+ for _, disabledName := range d.Disabled {
+ if _, ok := d.nameToAddToManager[disabledName]; !ok {
+ return fmt.Errorf("cannot disable unknown controller %q", disabledName)
+ }
+ disabled.Insert(disabledName)
+ }
+
+ for name, addToManager := range d.nameToAddToManager {
+ if !disabled.Has(name) {
+ d.addToManagerBuilder.Register(addToManager)
+ }
+ }
+ return nil
+}
+
+// Completed returns the completed SwitchConfig. Call this only after successfully calling `Completed`.
+func (d *SwitchOptions) Completed() *SwitchConfig {
+ return &SwitchConfig{d.addToManagerBuilder.AddToManager}
+}
+
+// SwitchConfig is the completed configuration of SwitchOptions.
+type SwitchConfig struct {
+ AddToManager func(manager.Manager) error
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/reconciler_options.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/reconciler_options.go
new file mode 100644
index 0000000..d210c66
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/reconciler_options.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "github.com/spf13/pflag"
+)
+
+const (
+ // IgnoreOperationAnnotationFlag is the name of the command line flag to specify whether the operation annotation
+ // is ignored or not.
+ IgnoreOperationAnnotationFlag = "ignore-operation-annotation"
+)
+
+// ReconcilerOptions are command line options that can be set for controller.Options.
+type ReconcilerOptions struct {
+ // IgnoreOperationAnnotation defines whether to ignore the operation annotation or not.
+ IgnoreOperationAnnotation bool
+
+ config *ReconcilerConfig
+}
+
+// AddFlags implements Flagger.AddFlags.
+func (c *ReconcilerOptions) AddFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&c.IgnoreOperationAnnotation, IgnoreOperationAnnotationFlag, c.IgnoreOperationAnnotation, "Ignore the operation annotation or not.")
+}
+
+// Complete implements Completer.Complete.
+func (c *ReconcilerOptions) Complete() error {
+ c.config = &ReconcilerConfig{c.IgnoreOperationAnnotation}
+ return nil
+}
+
+// Completed returns the completed ReconcilerConfig. Only call this if `Complete` was successful.
+func (c *ReconcilerOptions) Completed() *ReconcilerConfig {
+ return c.config
+}
+
+// ReconcilerConfig is a completed controller configuration.
+type ReconcilerConfig struct {
+ // IgnoreOperationAnnotation defines whether to ignore the operation annotation or not.
+ IgnoreOperationAnnotation bool
+}
+
+// Apply sets the values of this ReconcilerConfig in the given controller.Options.
+func (c *ReconcilerConfig) Apply(ignore *bool) {
+ *ignore = c.IgnoreOperationAnnotation
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/error/requeue_error.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/error/requeue_error.go
new file mode 100644
index 0000000..8034268
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/error/requeue_error.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package error
+
+import (
+ "fmt"
+ "time"
+)
+
+// RequeueAfterError is an error that indicates that an actuator wants a reconcile operation
+// to be requeued again after RequeueAfter has passed.
+type RequeueAfterError struct {
+ // Cause is an optional cause that may be returned together with a time for requeueing.
+ Cause error
+ // RequeueAfter is the duration after which the request should be enqueued again.
+ RequeueAfter time.Duration
+}
+
+func (e *RequeueAfterError) Error() string {
+ if e.Cause == nil {
+ return fmt.Sprintf("requeue in %s", e.RequeueAfter)
+ }
+
+ return fmt.Sprintf("requeue in %s due to %+v", e.RequeueAfter, e.Cause)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/actuator.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/actuator.go
new file mode 100644
index 0000000..7a74016
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/actuator.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extension
+
+import (
+ "context"
+
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+)
+
+// Actuator acts upon Extension resources.
+type Actuator interface {
+ // Reconcile the Extension resource.
+ Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extension) error
+ // Delete the Extension resource.
+ Delete(ctx context.Context, ex *extensionsv1alpha1.Extension) error
+ // Restore the Extension resource.
+ Restore(ctx context.Context, ex *extensionsv1alpha1.Extension) error
+ // Migrate the Extension resource.
+ Migrate(ctx context.Context, ex *extensionsv1alpha1.Extension) error
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/mapper.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/mapper.go
new file mode 100644
index 0000000..e479803
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/mapper.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extension
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+
+ extensionshandler "github.com/gardener/gardener/extensions/pkg/handler"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+)
+
+// ClusterToExtensionMapper returns a mapper that returns requests for Extensions whose
+// referenced clusters have been modified.
+func ClusterToExtensionMapper(predicates ...predicate.Predicate) extensionshandler.Mapper {
+ return extensionshandler.ClusterToObjectMapper(func() client.ObjectList { return &extensionsv1alpha1.ExtensionList{} }, predicates)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/reconciler.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/reconciler.go
new file mode 100644
index 0000000..c9e17cf
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/extension/reconciler.go
@@ -0,0 +1,320 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extension
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-logr/logr"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+ extensionshandler "github.com/gardener/gardener/extensions/pkg/handler"
+ extensionspredicate "github.com/gardener/gardener/extensions/pkg/predicate"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+)
+
+const (
+ // FinalizerPrefix is the prefix name of the finalizer written by this controller.
+ FinalizerPrefix = "extensions.gardener.cloud"
+)
+
+// AddArgs are arguments for adding an Extension resources controller to a manager.
+type AddArgs struct {
+ // Actuator is an Extension resource actuator.
+ Actuator Actuator
+ // Name is the name of the controller.
+ Name string
+ // FinalizerSuffix is the suffix for the finalizer name.
+ FinalizerSuffix string
+ // ControllerOptions are the controller options used for creating a controller.
+ // The options.Reconciler is always overridden with a reconciler created from the
+ // given actuator.
+ ControllerOptions controller.Options
+ // Predicates are the predicates to use.
+ Predicates []predicate.Predicate
+ // Resync determines the requeue interval.
+ Resync time.Duration
+ // Type is the type of the resource considered for reconciliation.
+ Type string
+ // IgnoreOperationAnnotation specifies whether to ignore the operation annotation or not.
+ // If the annotation is not ignored, the extension controller will only reconcile
+ // with a present operation annotation typically set during a reconcile (e.g in the maintenance time) by the Gardenlet
+ IgnoreOperationAnnotation bool
+}
+
+// Add adds an Extension controller to the given manager using the given AddArgs.
+func Add(mgr manager.Manager, args AddArgs) error {
+ args.ControllerOptions.Reconciler = NewReconciler(args)
+ return add(mgr, args)
+}
+
+// DefaultPredicates returns the default predicates for an extension reconciler.
+func DefaultPredicates(ignoreOperationAnnotation bool) []predicate.Predicate {
+ if ignoreOperationAnnotation {
+ return []predicate.Predicate{
+ predicate.GenerationChangedPredicate{},
+ }
+ }
+ return []predicate.Predicate{
+ predicate.Or(
+ extensionspredicate.HasOperationAnnotation(),
+ extensionspredicate.LastOperationNotSuccessful(),
+ extensionspredicate.IsDeleting(),
+ ),
+ extensionspredicate.ShootNotFailed(),
+ }
+}
+
+func add(mgr manager.Manager, args AddArgs) error {
+ ctrl, err := controller.New(args.Name, mgr, args.ControllerOptions)
+ if err != nil {
+ return err
+ }
+
+ predicates := extensionspredicate.AddTypePredicate(args.Predicates, args.Type)
+
+ if args.IgnoreOperationAnnotation {
+ if err := ctrl.Watch(
+ &source.Kind{Type: &extensionsv1alpha1.Cluster{}},
+ extensionshandler.EnqueueRequestsFromMapper(ClusterToExtensionMapper(predicates...), extensionshandler.UpdateWithNew),
+ ); err != nil {
+ return err
+ }
+ }
+
+ return ctrl.Watch(&source.Kind{Type: &extensionsv1alpha1.Extension{}}, &handler.EnqueueRequestForObject{}, predicates...)
+}
+
+// reconciler reconciles Extension resources of Gardener's
+// `extensions.gardener.cloud` API group.
+type reconciler struct {
+ logger logr.Logger
+ actuator Actuator
+ finalizerName string
+
+ client client.Client
+
+ resync time.Duration
+}
+
+// NewReconciler creates a new reconcile.Reconciler that reconciles
+// Extension resources of Gardener's `extensions.gardener.cloud` API group.
+func NewReconciler(args AddArgs) reconcile.Reconciler {
+ logger := log.Log.WithName(args.Name)
+ finalizer := fmt.Sprintf("%s/%s", FinalizerPrefix, args.FinalizerSuffix)
+ return extensionscontroller.OperationAnnotationWrapper(
+ func() client.Object { return &extensionsv1alpha1.Extension{} },
+ &reconciler{
+ logger: logger,
+ actuator: args.Actuator,
+ finalizerName: finalizer,
+ resync: args.Resync,
+ })
+}
+
+// InjectFunc enables dependency injection into the actuator.
+func (r *reconciler) InjectFunc(f inject.Func) error {
+ return f(r.actuator)
+}
+
+// InjectClient injects the controller runtime client into the reconciler.
+func (r *reconciler) InjectClient(client client.Client) error {
+ r.client = client
+ return nil
+}
+
+// Reconcile is the reconciler function that gets executed in case there are new events for `Extension` resources.
+func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+ ex := &extensionsv1alpha1.Extension{}
+ if err := r.client.Get(ctx, request.NamespacedName, ex); err != nil {
+ if apierrors.IsNotFound(err) {
+ return reconcile.Result{}, nil
+ }
+ return reconcile.Result{}, fmt.Errorf("could not fetch Extension resource: %+v", err)
+ }
+
+ var result reconcile.Result
+
+ shoot, err := extensionscontroller.GetShoot(ctx, r.client, request.Namespace)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if extensionscontroller.IsShootFailed(shoot) {
+ r.logger.Info("Stop reconciling Extension of failed Shoot.", "namespace", request.Namespace, "name", ex.Name)
+ return reconcile.Result{}, nil
+ }
+
+ operationType := gardencorev1beta1helper.ComputeOperationType(ex.ObjectMeta, ex.Status.LastOperation)
+
+ switch {
+ case extensionscontroller.IsMigrated(ex):
+ return reconcile.Result{}, nil
+ case operationType == gardencorev1beta1.LastOperationTypeMigrate:
+ return r.migrate(ctx, ex)
+ case ex.DeletionTimestamp != nil:
+ return r.delete(ctx, ex)
+ case ex.Annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationRestore:
+ return r.restore(ctx, ex, operationType)
+ default:
+ if result, err = r.reconcile(ctx, ex, operationType); err != nil {
+ return result, err
+ }
+ return reconcile.Result{Requeue: r.resync != 0, RequeueAfter: r.resync}, nil
+ }
+}
+
+func (r *reconciler) reconcile(ctx context.Context, ex *extensionsv1alpha1.Extension, operationType gardencorev1beta1.LastOperationType) (reconcile.Result, error) {
+ if err := extensionscontroller.EnsureFinalizer(ctx, r.client, ex, r.finalizerName); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := r.updateStatusProcessing(ctx, ex, operationType, "Reconciling Extension resource"); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := r.actuator.Reconcile(ctx, ex); err != nil {
+ _ = r.updateStatusError(ctx, extensionscontroller.ReconcileErrCauseOrErr(err), ex, operationType, "Unable to reconcile Extension resource")
+ return extensionscontroller.ReconcileErr(err)
+ }
+
+ if err := r.updateStatusSuccess(ctx, ex, operationType, "Successfully reconciled Extension resource"); err != nil {
+ return reconcile.Result{}, err
+ }
+ return reconcile.Result{}, nil
+}
+
+func (r *reconciler) delete(ctx context.Context, ex *extensionsv1alpha1.Extension) (reconcile.Result, error) {
+ hasFinalizer, err := extensionscontroller.HasFinalizer(ex, r.finalizerName)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("could not instantiate finalizer deletion: %+v", err)
+ }
+
+ if !hasFinalizer {
+ r.logger.Info("Reconciling Extension resource causes a no-op as there is no finalizer.", "extension", ex.Name, "namespace", ex.Namespace)
+ return reconcile.Result{}, nil
+ }
+
+ if err := r.updateStatusProcessing(ctx, ex, gardencorev1beta1.LastOperationTypeDelete, "Deleting Extension resource."); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := r.actuator.Delete(ctx, ex); err != nil {
+ _ = r.updateStatusError(ctx, extensionscontroller.ReconcileErrCauseOrErr(err), ex, gardencorev1beta1.LastOperationTypeDelete, "Error deleting Extension resource")
+ return extensionscontroller.ReconcileErr(err)
+ }
+
+ if err := r.updateStatusSuccess(ctx, ex, gardencorev1beta1.LastOperationTypeDelete, "Successfully deleted Extension resource"); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := extensionscontroller.DeleteFinalizer(ctx, r.client, ex, r.finalizerName); err != nil {
+ return reconcile.Result{}, fmt.Errorf("error removing finalizer from Extension resource: %+v", err)
+ }
+ return reconcile.Result{}, nil
+}
+
+func (r *reconciler) restore(ctx context.Context, ex *extensionsv1alpha1.Extension, operationType gardencorev1beta1.LastOperationType) (reconcile.Result, error) {
+ if err := extensionscontroller.EnsureFinalizer(ctx, r.client, ex, r.finalizerName); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := r.updateStatusProcessing(ctx, ex, operationType, "Restoring Extension resource"); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := r.actuator.Restore(ctx, ex); err != nil {
+ _ = r.updateStatusError(ctx, extensionscontroller.ReconcileErrCauseOrErr(err), ex, operationType, "Unable to restore Extension resource")
+ return extensionscontroller.ReconcileErr(err)
+ }
+
+ if err := r.updateStatusSuccess(ctx, ex, operationType, "Successfully restored Extension resource"); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ // remove operation annotation 'restore'
+ if err := extensionscontroller.RemoveAnnotation(ctx, r.client, ex, v1beta1constants.GardenerOperation); err != nil {
+ return reconcile.Result{}, fmt.Errorf("error removing annotation from Extension resource: %+v", err)
+ }
+
+ return reconcile.Result{}, nil
+}
+
+func (r *reconciler) migrate(ctx context.Context, ex *extensionsv1alpha1.Extension) (reconcile.Result, error) {
+ if err := r.updateStatusProcessing(ctx, ex, gardencorev1beta1.LastOperationTypeMigrate, "Migrate Extension resource."); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := r.actuator.Migrate(ctx, ex); err != nil {
+ _ = r.updateStatusError(ctx, extensionscontroller.ReconcileErrCauseOrErr(err), ex, gardencorev1beta1.LastOperationTypeMigrate, "Error migrating Extension resource")
+ return extensionscontroller.ReconcileErr(err)
+ }
+
+ if err := r.updateStatusSuccess(ctx, ex, gardencorev1beta1.LastOperationTypeMigrate, "Successfully migrated Extension resource"); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if err := extensionscontroller.DeleteAllFinalizers(ctx, r.client, ex); err != nil {
+ return reconcile.Result{}, fmt.Errorf("error removing all finalizers from Extension resource: %+v", err)
+ }
+
+ // remove operation annotation 'migrate'
+ if err := extensionscontroller.RemoveAnnotation(ctx, r.client, ex, v1beta1constants.GardenerOperation); err != nil {
+ return reconcile.Result{}, fmt.Errorf("error removing annotation from Extension resource: %+v", err)
+ }
+
+ return reconcile.Result{}, nil
+}
+
+func (r *reconciler) updateStatusProcessing(ctx context.Context, ex *extensionsv1alpha1.Extension, lastOperationType gardencorev1beta1.LastOperationType, description string) error {
+ r.logger.Info(description, "extension", ex.Name, "namespace", ex.Namespace)
+ return extensionscontroller.TryUpdateStatus(ctx, retry.DefaultBackoff, r.client, ex, func() error {
+ ex.Status.LastOperation = extensionscontroller.LastOperation(lastOperationType, gardencorev1beta1.LastOperationStateProcessing, 1, description)
+ return nil
+ })
+}
+
+func (r *reconciler) updateStatusError(ctx context.Context, err error, ex *extensionsv1alpha1.Extension, lastOperationType gardencorev1beta1.LastOperationType, description string) error {
+ return extensionscontroller.TryUpdateStatus(ctx, retry.DefaultBackoff, r.client, ex, func() error {
+ ex.Status.ObservedGeneration = ex.Generation
+ ex.Status.LastOperation, ex.Status.LastError = extensionscontroller.ReconcileError(lastOperationType, gardencorev1beta1helper.FormatLastErrDescription(fmt.Errorf("%s: %v", description, err)), 50, gardencorev1beta1helper.ExtractErrorCodes(gardencorev1beta1helper.DetermineError(err, err.Error()))...)
+ return nil
+ })
+}
+
+func (r *reconciler) updateStatusSuccess(ctx context.Context, ex *extensionsv1alpha1.Extension, lastOperationType gardencorev1beta1.LastOperationType, description string) error {
+ r.logger.Info(description, "extension", ex.Name, "namespace", ex.Namespace)
+ return extensionscontroller.TryUpdateStatus(ctx, retry.DefaultBackoff, r.client, ex, func() error {
+ ex.Status.ObservedGeneration = ex.Generation
+ ex.Status.LastOperation, ex.Status.LastError = extensionscontroller.ReconcileSucceeded(lastOperationType, description)
+ return nil
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/actuator.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/actuator.go
new file mode 100644
index 0000000..6505c17
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/actuator.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "context"
+ "time"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+)
+
+/*
+ Each extension can register multiple HealthCheckActuator with various HealthChecks for checking the API Objects it deploys.
+ Each NewActuator is responsible for a single extension resource (e.g Worker) - predicates can be defined for fine-grained control over which objects to watch.
+
+ The HealthCheck Reconciler triggers the registered NewActuator to execute the health checks.
+ After, the Reconciler writes Conditions to the extension resource. One condition per HealthConditionType (e.g multiple checks that contribute to the HealthConditionType XYZ result in one Condition with .type XYZ).
+ To contribute to the Shoot's health, the Gardener/Gardenlet checks each extension for Conditions containing one of the following HealthConditionTypes: SystemComponentsHealthy, EveryNodeReady, ControlPlaneHealthy.
+ However extensions are free to choose any healthCheckType.
+
+ Generic HealthCheck functions for various API Objects are provided and can be reused.
+ Many providers deploy helm charts via managed resources that are picked up by the resource-manager making sure that
+ the helm chart is applied and all its components (Deployments, StatefulSets, DeamonSets, ...) are healthy.
+ To integrate, the health check controller can also check the health of managed resources.
+
+ More sophisticated checks should be implemented in the extension itself by using the HealthCheck interface.
+*/
+
+// GetExtensionObjectFunc returns the extension object that should be registered with the health check controller.
+// For example: func() extensionsv1alpha1.Object {return &extensionsv1alpha1.Worker{}}
+type GetExtensionObjectFunc = func() extensionsv1alpha1.Object
+
+// GetExtensionObjectFunc returns the extension object that should be registered with the health check controller. Has to be a List.
+// For example: func() client.ObjectList { return &extensionsv1alpha1.WorkerList{} }
+type GetExtensionObjectListFunc = func() client.ObjectList
+
+// PreCheckFunc checks whether the health check shall be performed based on the given object and cluster.
+type PreCheckFunc = func(runtime.Object, *extensionscontroller.Cluster) bool
+
+// ConditionTypeToHealthCheck registers a HealthCheck for the given ConditionType. If the PreCheckFunc is not nil it will
+// be executed with the given object before the health check if performed. Otherwise, the health check will always be
+// performed.
+type ConditionTypeToHealthCheck struct {
+ ConditionType string
+ PreCheckFunc PreCheckFunc
+ HealthCheck HealthCheck
+}
+
+// HealthCheckActuator acts upon registered resources.
+type HealthCheckActuator interface {
+ // ExecuteHealthCheckFunctions is regularly called by the health check controller
+ // Executes all registered Health Checks and aggregates the result
+ // Returns Result for each healthConditionTypes registered with the individual health checks.
+ // returns an error if it could not execute the health checks
+ // returning an error results in a condition with with type "Unknown" with reason "ConditionCheckError"
+ ExecuteHealthCheckFunctions(context.Context, types.NamespacedName) (*[]Result, error)
+}
+
+// Result represents an aggregated health status for the health checks performed on the dependent API Objects of an extension resource.
+// An Result refers to a single healthConditionTypes (e.g SystemComponentsHealthy) of an extension Resource.
+type Result struct {
+ // HealthConditionType is being used as the .type field of the Condition that the HealthCheck controller writes to the extension Resource.
+ // To contribute to the Shoot's health, the Gardener checks each extension for a Health Condition Type of SystemComponentsHealthy, EveryNodeReady, ControlPlaneHealthy.
+ HealthConditionType string
+ // Status contains the status for the health checks that have been performed for an extension resource
+ Status gardencorev1beta1.ConditionStatus
+ // Detail contains details for health checks being unsuccessful
+ Detail *string
+ // SuccessfulChecks is the amount of successful health checks
+ SuccessfulChecks int
+ // ProgressingChecks is the amount of health checks that were progressing
+ ProgressingChecks int
+ // UnsuccessfulChecks is the amount of health checks that were not successful
+ UnsuccessfulChecks int
+ // FailedChecks is the amount of health checks that could not be performed (e.g client could not reach Api Server)
+ // Results in a condition with with type "Unknown" with reason "ConditionCheckError" for this healthConditionType
+ FailedChecks int
+ // Codes is an optional list of error codes that were produced by the health checks.
+ Codes []gardencorev1beta1.ErrorCode
+ // ProgressingThreshold is the threshold duration after which a health check that reported the `Progressing` status
+ // shall be transitioned to `False`
+ ProgressingThreshold *time.Duration
+}
+
+// GetDetails returns the details of the health check result
+func (h *Result) GetDetails() string {
+ if h.Detail == nil {
+ return ""
+ }
+ return *h.Detail
+}
+
+// HealthCheck represents a single health check
+// Each health check gets the shoot and seed clients injected
+// returns isHealthy, conditionReason, conditionDetail and error
+// returning an error means the health check could not be conducted and will result in a condition with with type "Unknown" and reason "ConditionCheckError"
+type HealthCheck interface {
+ // Check is the function that executes the actual health check
+ Check(context.Context, types.NamespacedName) (*SingleCheckResult, error)
+ // SetLoggerSuffix injects the logger
+ SetLoggerSuffix(string, string)
+ // DeepCopy clones the healthCheck
+ DeepCopy() HealthCheck
+}
+
+// SingleCheckResult is the result for a health check
+type SingleCheckResult struct {
+ // Status contains the status for the health check that has been performed for an extension resource
+ Status gardencorev1beta1.ConditionStatus
+ // Detail contains details for the health check being unsuccessful
+ Detail string
+ // Reason contains the reason for the health check being unsuccessful
+ Reason string
+ // Codes optionally contains a list of error codes related to the health check
+ Codes []gardencorev1beta1.ErrorCode
+ // ProgressingThreshold is the threshold duration after which a health check that reported the `Progressing` status
+ // shall be transitioned to `False`
+ ProgressingThreshold *time.Duration
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config/types.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config/types.go
new file mode 100644
index 0000000..a0d225b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config/types.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+type HealthCheckConfig struct {
+ // SyncPeriod is the duration how often the existing resources are reconciled (how
+ // often the health check of Shoot clusters is performed (only if no operation is
+ // already running on them).
+ // defaults to 30 sec
+ SyncPeriod metav1.Duration
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/controller.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/controller.go
new file mode 100644
index 0000000..9cb1da3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/controller.go
@@ -0,0 +1,191 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "fmt"
+
+ healthcheckconfig "github.com/gardener/gardener/extensions/pkg/controller/healthcheck/config"
+ extensionshandler "github.com/gardener/gardener/extensions/pkg/handler"
+ extensionspredicate "github.com/gardener/gardener/extensions/pkg/predicate"
+ "github.com/gardener/gardener/pkg/api/extensions"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ "github.com/gardener/gardener/pkg/utils"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+const (
+ // ControllerName is the name of the controller.
+ ControllerName = "healthcheck_controller"
+)
+
+// AddArgs are arguments for adding an health check controller to a controller-runtime manager.
+type AddArgs struct {
+ // ControllerOptions are the controller options used for creating a controller.
+ // The options.Reconciler is always overridden with a reconciler created from the
+ // given actuator.
+ ControllerOptions controller.Options
+ // Predicates are the predicates to use.
+ // If unset, GenerationChanged will be used.
+ Predicates []predicate.Predicate
+ // Type is the type of the resource considered for reconciliation.
+ Type string
+ // SyncPeriod is the duration how often the registered extension is being reconciled
+ SyncPeriod metav1.Duration
+ // registeredExtension is the registered extensions that the HealthCheck Controller watches and writes HealthConditions for.
+ // The Gardenlet reads the conditions on the extension Resource.
+ // Through this mechanism, the extension can contribute to the Shoot's HealthStatus.
+ registeredExtension *RegisteredExtension
+ // GetExtensionObjListFunc returns a list of the runtime.Object representation of the extension to register
+ GetExtensionObjListFunc GetExtensionObjectListFunc
+}
+
+// DefaultAddArgs are the default Args for the health check controller.
+type DefaultAddArgs struct {
+ // Controller are the controller.Options.
+ Controller controller.Options
+ // HealthCheckConfig contains additional config for the health check controller
+ HealthCheckConfig healthcheckconfig.HealthCheckConfig
+}
+
+// RegisteredExtension is a registered extensions that the HealthCheck Controller watches.
+// The field extension contains any extension object
+// The field healthConditionTypes contains all distinct healthCondition types (extracted from the healthCheck).
+// They are being used as the .type field of the Condition that the HealthCheck controller writes to the extension Resource.
+// The field groupVersionKind stores the GroupVersionKind of the extension resource
+type RegisteredExtension struct {
+ extension extensionsv1alpha1.Object
+ getExtensionObjFunc GetExtensionObjectFunc
+ healthConditionTypes []string
+ groupVersionKind schema.GroupVersionKind
+}
+
+// DefaultRegistration configures the default health check NewActuator to execute the provided health checks and adds it to the provided controller-runtime manager.
+// the NewActuator reconciles a single extension with a specific type and writes conditions for each distinct healthConditionTypes.
+// extensionType (e.g aws) defines the spec.type of the extension to watch
+// kind defines the GroupVersionKind of the extension
+// GetExtensionObjListFunc returns a list of the runtime.Object representation of the extension to register
+// getExtensionObjFunc returns a runtime.Object representation of the extension to register
+// mgr is the controller runtime manager
+// opts contain config for the healthcheck controller
+// custom predicates allow for fine-grained control which resources to watch
+// healthChecks defines the checks to execute mapped to the healthConditionTypes its contributing to (e.g checkDeployment in Seed -> ControlPlaneHealthy).
+// register returns a runtime representation of the extension resource to register it with the controller-runtime
+func DefaultRegistration(extensionType string, kind schema.GroupVersionKind, getExtensionObjListFunc GetExtensionObjectListFunc, getExtensionObjFunc GetExtensionObjectFunc, mgr manager.Manager, opts DefaultAddArgs, customPredicates []predicate.Predicate, healthChecks []ConditionTypeToHealthCheck) error {
+ predicates := append(DefaultPredicates(), customPredicates...)
+
+ args := AddArgs{
+ ControllerOptions: opts.Controller,
+ Predicates: predicates,
+ Type: extensionType,
+ SyncPeriod: opts.HealthCheckConfig.SyncPeriod,
+ GetExtensionObjListFunc: getExtensionObjListFunc,
+ }
+
+ if err := args.RegisterExtension(getExtensionObjFunc, getHealthCheckTypes(healthChecks), kind); err != nil {
+ return err
+ }
+
+ healthCheckActuator := NewActuator(args.Type, args.GetExtensionGroupVersionKind().Kind, getExtensionObjFunc, healthChecks)
+ return Register(mgr, args, healthCheckActuator)
+}
+
+// RegisterExtension registered a resource and its corresponding healthCheckTypes.
+// throws and error if the extensionResources is not a extensionsv1alpha1.Object
+// The controller writes the healthCheckTypes as a condition.type into the extension resource.
+// To contribute to the Shoot's health, the Gardener checks each extension for a Health Condition Type of SystemComponentsHealthy, EveryNodeReady, ControlPlaneHealthy.
+// However extensions are free to choose any healthCheckType
+func (a *AddArgs) RegisterExtension(getExtensionObjFunc GetExtensionObjectFunc, conditionTypes []string, kind schema.GroupVersionKind) error {
+ acc, err := extensions.Accessor(getExtensionObjFunc())
+ if err != nil {
+ return err
+ }
+
+ a.registeredExtension = &RegisteredExtension{
+ extension: acc,
+ healthConditionTypes: conditionTypes,
+ groupVersionKind: kind,
+ getExtensionObjFunc: getExtensionObjFunc,
+ }
+ return nil
+}
+
+func (a *AddArgs) GetExtensionGroupVersionKind() schema.GroupVersionKind {
+ return a.registeredExtension.groupVersionKind
+}
+
+// DefaultPredicates returns the default predicates.
+func DefaultPredicates() []predicate.Predicate {
+ return []predicate.Predicate{
+ // watch: only requeue on spec change to prevent infinite loop
+ // health checks are being executed every 'sync period' anyways
+ predicate.GenerationChangedPredicate{},
+ }
+}
+
+// Register the extension resource. Must be of type extensionsv1alpha1.Object
+// Add creates a new Reconciler and adds it to the Manager.
+// and Start it when the Manager is Started.
+func Register(mgr manager.Manager, args AddArgs, actuator HealthCheckActuator) error {
+ args.ControllerOptions.Reconciler = NewReconciler(mgr, actuator, *args.registeredExtension, args.SyncPeriod)
+ return add(mgr, args)
+}
+
+func add(mgr manager.Manager, args AddArgs) error {
+ // generate random string to create unique manager name, in case multiple managers register the same extension resource
+ str, err := utils.GenerateRandomString(10)
+ if err != nil {
+ return err
+ }
+
+ controllerName := fmt.Sprintf("%s-%s-%s-%s-%s", ControllerName, args.registeredExtension.groupVersionKind.Kind, args.registeredExtension.groupVersionKind.Group, args.registeredExtension.groupVersionKind.Version, str)
+ ctrl, err := controller.New(controllerName, mgr, args.ControllerOptions)
+ if err != nil {
+ return err
+ }
+
+ log.Log.Info("Registered health check controller", "kind", args.registeredExtension.groupVersionKind.Kind, "type", args.Type, "health check type", args.registeredExtension.healthConditionTypes, "sync period", args.SyncPeriod.Duration.String())
+
+ // add type predicate to only watch registered resource (e.g ControlPlane) with a certain type (e.g aws)
+ predicates := extensionspredicate.AddTypePredicate(args.Predicates, args.Type)
+
+ if err := ctrl.Watch(&source.Kind{Type: args.registeredExtension.getExtensionObjFunc()}, &handler.EnqueueRequestForObject{}, predicates...); err != nil {
+ return err
+ }
+
+ // watch Cluster of Shoot provider type (e.g aws)
+ // this is to be notified when the Shoot is being hibernated (stop health checks) and wakes up (start health checks again)
+ return ctrl.Watch(
+ &source.Kind{Type: &extensionsv1alpha1.Cluster{}},
+ extensionshandler.EnqueueRequestsFromMapper(extensionshandler.ClusterToObjectMapper(args.GetExtensionObjListFunc, predicates), extensionshandler.UpdateWithNew),
+ )
+}
+
+func getHealthCheckTypes(healthChecks []ConditionTypeToHealthCheck) []string {
+ types := sets.NewString()
+ for _, healthCheck := range healthChecks {
+ types.Insert(healthCheck.ConditionType)
+ }
+ return types.UnsortedList()
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/healtcheck_actuator.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/healtcheck_actuator.go
new file mode 100644
index 0000000..654833a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/healtcheck_actuator.go
@@ -0,0 +1,284 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+ "github.com/gardener/gardener/extensions/pkg/util"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ "github.com/go-logr/logr"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/rest"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+// Actuator contains all the health checks and the means to execute them
+type Actuator struct {
+ logger logr.Logger
+
+ restConfig *rest.Config
+ seedClient client.Client
+ scheme *runtime.Scheme
+ decoder runtime.Decoder
+
+ provider string
+ extensionKind string
+ getExtensionObjFunc GetExtensionObjectFunc
+ healthChecks []ConditionTypeToHealthCheck
+}
+
+// NewActuator creates a new Actuator.
+func NewActuator(provider, extensionKind string, getExtensionObjFunc GetExtensionObjectFunc, healthChecks []ConditionTypeToHealthCheck) HealthCheckActuator {
+ return &Actuator{
+ healthChecks: healthChecks,
+ getExtensionObjFunc: getExtensionObjFunc,
+ provider: provider,
+ extensionKind: extensionKind,
+ logger: log.Log.WithName(fmt.Sprintf("%s-%s-healthcheck-actuator", provider, extensionKind)),
+ }
+}
+
+func (a *Actuator) InjectScheme(scheme *runtime.Scheme) error {
+ a.scheme = scheme
+ a.decoder = serializer.NewCodecFactory(a.scheme).UniversalDecoder()
+ return nil
+}
+
+func (a *Actuator) InjectClient(client client.Client) error {
+ a.seedClient = client
+ return nil
+}
+
+func (a *Actuator) InjectConfig(config *rest.Config) error {
+ a.restConfig = config
+ return nil
+}
+
+type healthCheckUnsuccessful struct {
+ reason string
+ detail string
+}
+
+type healthCheckProgressing struct {
+ reason string
+ detail string
+ threshold *time.Duration
+}
+
+type channelResult struct {
+ healthConditionType string
+ healthCheckResult *SingleCheckResult
+ error error
+}
+
+type checkResultForConditionType struct {
+ failedChecks []error
+ unsuccessfulChecks []healthCheckUnsuccessful
+ progressingChecks []healthCheckProgressing
+ successfulChecks int
+ codes []gardencorev1beta1.ErrorCode
+}
+
+// ExecuteHealthCheckFunctions executes all the health check functions, injects clients and logger & aggregates the results.
+// returns an Result for each HealthConditionType (e.g ControlPlaneHealthy)
+func (a *Actuator) ExecuteHealthCheckFunctions(ctx context.Context, request types.NamespacedName) (*[]Result, error) {
+ var (
+ shootClient client.Client
+ channel = make(chan channelResult)
+ wg sync.WaitGroup
+ )
+
+ wg.Add(len(a.healthChecks))
+ for _, hc := range a.healthChecks {
+ // clone to avoid problems during parallel execution
+ check := hc.HealthCheck.DeepCopy()
+ SeedClientInto(a.seedClient, check)
+ if _, ok := check.(ShootClient); ok {
+ if shootClient == nil {
+ var err error
+ _, shootClient, err = util.NewClientForShoot(ctx, a.seedClient, request.Namespace, client.Options{})
+ if err != nil {
+ msg := fmt.Errorf("failed to create shoot client in namespace '%s': %v", request.Namespace, err)
+ a.logger.Error(err, msg.Error())
+ return nil, msg
+ }
+ }
+ ShootClientInto(shootClient, check)
+ }
+
+ check.SetLoggerSuffix(a.provider, a.extensionKind)
+
+ go func(ctx context.Context, request types.NamespacedName, check HealthCheck, preCheckFunc PreCheckFunc, healthConditionType string) {
+ defer wg.Done()
+
+ if preCheckFunc != nil {
+ obj := a.getExtensionObjFunc()
+ if err := a.seedClient.Get(ctx, client.ObjectKey{Namespace: request.Namespace, Name: request.Name}, obj); err != nil {
+ channel <- channelResult{
+ healthCheckResult: &SingleCheckResult{
+ Status: gardencorev1beta1.ConditionFalse,
+ Detail: err.Error(),
+ Reason: "ReadExtensionObjectFailed",
+ },
+ error: err,
+ healthConditionType: healthConditionType,
+ }
+ return
+ }
+
+ cluster, err := extensionscontroller.GetCluster(ctx, a.seedClient, request.Namespace)
+ if err != nil {
+ channel <- channelResult{
+ healthCheckResult: &SingleCheckResult{
+ Status: gardencorev1beta1.ConditionFalse,
+ Detail: err.Error(),
+ Reason: "ReadClusterObjectFailed",
+ },
+ error: err,
+ healthConditionType: healthConditionType,
+ }
+ return
+ }
+
+ if !preCheckFunc(obj, cluster) {
+ a.logger.V(6).Info("Skipping health check as pre check function returned false", "condition type", healthConditionType)
+ channel <- channelResult{
+ healthCheckResult: &SingleCheckResult{
+ Status: gardencorev1beta1.ConditionTrue,
+ },
+ error: nil,
+ healthConditionType: healthConditionType,
+ }
+ return
+ }
+ }
+
+ healthCheckResult, err := check.Check(ctx, request)
+ channel <- channelResult{
+ healthCheckResult: healthCheckResult,
+ error: err,
+ healthConditionType: healthConditionType,
+ }
+ }(ctx, request, check, hc.PreCheckFunc, hc.ConditionType)
+ }
+
+ // close channel when wait group has 0 counter
+ go func() {
+ wg.Wait()
+ close(channel)
+ }()
+
+ groupedHealthCheckResults := make(map[string]*checkResultForConditionType)
+ // loop runs until channel is closed
+ for channelResult := range channel {
+ if groupedHealthCheckResults[channelResult.healthConditionType] == nil {
+ groupedHealthCheckResults[channelResult.healthConditionType] = &checkResultForConditionType{}
+ }
+ if channelResult.error != nil {
+ groupedHealthCheckResults[channelResult.healthConditionType].failedChecks = append(groupedHealthCheckResults[channelResult.healthConditionType].failedChecks, channelResult.error)
+ continue
+ }
+ if channelResult.healthCheckResult.Status == gardencorev1beta1.ConditionFalse {
+ groupedHealthCheckResults[channelResult.healthConditionType].unsuccessfulChecks = append(groupedHealthCheckResults[channelResult.healthConditionType].unsuccessfulChecks, healthCheckUnsuccessful{reason: channelResult.healthCheckResult.Reason, detail: channelResult.healthCheckResult.Detail})
+ groupedHealthCheckResults[channelResult.healthConditionType].codes = append(groupedHealthCheckResults[channelResult.healthConditionType].codes, channelResult.healthCheckResult.Codes...)
+ continue
+ }
+ if channelResult.healthCheckResult.Status == gardencorev1beta1.ConditionProgressing {
+ groupedHealthCheckResults[channelResult.healthConditionType].progressingChecks = append(groupedHealthCheckResults[channelResult.healthConditionType].progressingChecks, healthCheckProgressing{reason: channelResult.healthCheckResult.Reason, detail: channelResult.healthCheckResult.Detail, threshold: channelResult.healthCheckResult.ProgressingThreshold})
+ groupedHealthCheckResults[channelResult.healthConditionType].codes = append(groupedHealthCheckResults[channelResult.healthConditionType].codes, channelResult.healthCheckResult.Codes...)
+ continue
+ }
+ groupedHealthCheckResults[channelResult.healthConditionType].successfulChecks++
+ }
+
+ var checkResults []Result
+ for conditionType, result := range groupedHealthCheckResults {
+ if len(result.unsuccessfulChecks) > 0 || len(result.failedChecks) > 0 {
+ var details strings.Builder
+ if len(result.unsuccessfulChecks) > 0 {
+ details.WriteString("Unsuccessful checks: ")
+ }
+ for index, check := range result.unsuccessfulChecks {
+ details.WriteString(fmt.Sprintf("%d) %s: %s. ", index+1, check.reason, check.detail))
+ }
+ if len(result.progressingChecks) > 0 {
+ details.WriteString("Progressing checks: ")
+ }
+ for index, check := range result.progressingChecks {
+ details.WriteString(fmt.Sprintf("%d) %s: %s. ", index+1, check.reason, check.detail))
+ }
+ if len(result.failedChecks) > 0 {
+ details.WriteString("Failed checks: ")
+ }
+ for index, err := range result.failedChecks {
+ details.WriteString(fmt.Sprintf("%d) %s. ", index+1, err.Error()))
+ }
+ checkResults = append(checkResults, Result{
+ HealthConditionType: conditionType,
+ Status: gardencorev1beta1.ConditionFalse,
+ Detail: pointer.StringPtr(details.String()),
+ SuccessfulChecks: result.successfulChecks,
+ UnsuccessfulChecks: len(result.unsuccessfulChecks),
+ FailedChecks: len(result.failedChecks),
+ Codes: result.codes,
+ })
+ continue
+ }
+
+ if len(result.progressingChecks) > 0 {
+ var (
+ details strings.Builder
+ threshold *time.Duration
+ )
+
+ details.WriteString("Progressing checks: ")
+ for index, check := range result.progressingChecks {
+ details.WriteString(fmt.Sprintf("%d) %s: %s. ", index+1, check.reason, check.detail))
+ if check.threshold != nil && (threshold == nil || *threshold > *check.threshold) {
+ threshold = check.threshold
+ }
+ }
+ checkResults = append(checkResults, Result{
+ HealthConditionType: conditionType,
+ Status: gardencorev1beta1.ConditionProgressing,
+ ProgressingThreshold: threshold,
+ Detail: pointer.StringPtr(details.String()),
+ SuccessfulChecks: result.successfulChecks,
+ ProgressingChecks: len(result.progressingChecks),
+ Codes: result.codes,
+ })
+ continue
+ }
+
+ checkResults = append(checkResults, Result{
+ HealthConditionType: conditionType,
+ Status: gardencorev1beta1.ConditionTrue,
+ SuccessfulChecks: result.successfulChecks,
+ })
+ }
+
+ return &checkResults, nil
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/inject.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/inject.go
new file mode 100644
index 0000000..9b99a58
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/inject.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// ShootClient is an interface to be used to receive a shoot client.
+type ShootClient interface {
+ // InjectShootClient injects the shoot client
+ InjectShootClient(client.Client)
+}
+
+// ShootClient is an interface to be used to receive a seed client.
+type SeedClient interface {
+ // InjectSeedClient injects the seed client
+ InjectSeedClient(client.Client)
+}
+
+// ShootClientInto will set the shoot client on i if i implements ShootClient.
+func ShootClientInto(client client.Client, i interface{}) bool {
+ if s, ok := i.(ShootClient); ok {
+ s.InjectShootClient(client)
+ return true
+ }
+ return false
+}
+
+// SeedClientInto will set the seed client on i if i implements SeedClient.
+func SeedClientInto(client client.Client, i interface{}) bool {
+ if s, ok := i.(SeedClient); ok {
+ s.InjectSeedClient(client)
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/reconciler.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/reconciler.go
new file mode 100644
index 0000000..c83ff04
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/healthcheck/reconciler.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package healthcheck
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-logr/logr"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+ "github.com/gardener/gardener/pkg/api/extensions"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardenv1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+)
+
+type reconciler struct {
+ logger logr.Logger
+ actuator HealthCheckActuator
+ client client.Client
+ recorder record.EventRecorder
+ registeredExtension RegisteredExtension
+ syncPeriod metav1.Duration
+}
+
+const (
+ // ReasonUnsuccessful is the reason phrase for the health check condition if one or more of its tests failed.
+ ReasonUnsuccessful = "HealthCheckUnsuccessful"
+ // ReasonProgressing is the reason phrase for the health check condition if one or more of its tests are progressing.
+ ReasonProgressing = "HealthCheckProgressing"
+ // ReasonSuccessful is the reason phrase for the health check condition if all tests are successful.
+ ReasonSuccessful = "HealthCheckSuccessful"
+)
+
+// NewReconciler creates a new performHealthCheck.Reconciler that reconciles
+// the registered extension resources (Gardener's `extensions.gardener.cloud` API group).
+func NewReconciler(mgr manager.Manager, actuator HealthCheckActuator, registeredExtension RegisteredExtension, syncPeriod metav1.Duration) reconcile.Reconciler {
+ return &reconciler{
+ logger: log.Log.WithName(ControllerName),
+ actuator: actuator,
+ recorder: mgr.GetEventRecorderFor(ControllerName),
+ registeredExtension: registeredExtension,
+ syncPeriod: syncPeriod,
+ }
+}
+
+func (r *reconciler) InjectFunc(f inject.Func) error {
+ return f(r.actuator)
+}
+
+func (r *reconciler) InjectClient(client client.Client) error {
+ r.client = client
+ return nil
+}
+
+func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+ extension := r.registeredExtension.getExtensionObjFunc()
+
+ if err := r.client.Get(ctx, request.NamespacedName, extension); err != nil {
+ if errors.IsNotFound(err) {
+ return r.resultWithRequeue(), nil
+ }
+ return reconcile.Result{}, err
+ }
+
+ acc, err := extensions.Accessor(extension.DeepCopyObject())
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if acc.GetDeletionTimestamp() != nil {
+ r.logger.V(6).Info("Do not perform HealthCheck for extension resource. Extension is being deleted.", "name", acc.GetName(), "namespace", acc.GetNamespace())
+ return reconcile.Result{}, nil
+ }
+
+ if isInMigration(acc) {
+ r.logger.Info("Do not perform HealthCheck for extension resource. Extension is being migrated.", "name", acc.GetName(), "namespace", acc.GetNamespace())
+ return reconcile.Result{}, nil
+ }
+
+ cluster, err := extensionscontroller.GetCluster(ctx, r.client, acc.GetNamespace())
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ if extensionscontroller.IsHibernated(cluster) {
+ var conditions []condition
+ for _, healthConditionType := range r.registeredExtension.healthConditionTypes {
+ conditionBuilder, err := gardencorev1beta1helper.NewConditionBuilder(gardencorev1beta1.ConditionType(healthConditionType))
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ conditions = append(conditions, extensionConditionHibernated(conditionBuilder, healthConditionType))
+ }
+ if err := r.updateExtensionConditions(ctx, extension, conditions...); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ r.logger.V(6).Info("Do not perform HealthCheck for extension resource. Shoot is hibernated.", "name", acc.GetName(), "namespace", acc.GetNamespace(), "kind", acc.GetObjectKind().GroupVersionKind().Kind)
+ return reconcile.Result{}, nil
+ }
+
+ r.logger.V(6).Info("Performing health check", "name", acc.GetName(), "namespace", acc.GetNamespace(), "kind", acc.GetObjectKind().GroupVersionKind().Kind)
+ return r.performHealthCheck(ctx, request, extension)
+}
+
+func (r *reconciler) performHealthCheck(ctx context.Context, request reconcile.Request, extension extensionsv1alpha1.Object) (reconcile.Result, error) {
+ healthCheckResults, err := r.actuator.ExecuteHealthCheckFunctions(ctx, types.NamespacedName{Namespace: request.Namespace, Name: request.Name})
+ if err != nil {
+ var conditions []condition
+ r.logger.Info("Failed to execute healthChecks. Updating each HealthCheckCondition for the extension resource to ConditionCheckError.", "kind", r.registeredExtension.groupVersionKind.Kind, "health condition types", r.registeredExtension.healthConditionTypes, "name", request.Name, "namespace", request.Namespace, "error", err.Error())
+ for _, healthConditionType := range r.registeredExtension.healthConditionTypes {
+ conditionBuilder, buildErr := gardencorev1beta1helper.NewConditionBuilder(gardencorev1beta1.ConditionType(healthConditionType))
+ if buildErr != nil {
+ return reconcile.Result{}, buildErr
+ }
+
+ conditions = append(conditions, extensionConditionFailedToExecute(conditionBuilder, healthConditionType, r.registeredExtension.groupVersionKind.Kind, err))
+ }
+ if updateErr := r.updateExtensionConditions(ctx, extension, conditions...); updateErr != nil {
+ return reconcile.Result{}, updateErr
+ }
+ return r.resultWithRequeue(), nil
+ }
+
+ conditions := make([]condition, 0, len(*healthCheckResults))
+ for _, healthCheckResult := range *healthCheckResults {
+ conditionBuilder, err := gardencorev1beta1helper.NewConditionBuilder(gardencorev1beta1.ConditionType(healthCheckResult.HealthConditionType))
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ var logger logr.Logger
+ if healthCheckResult.Status == gardencorev1beta1.ConditionTrue || healthCheckResult.Status == gardencorev1beta1.ConditionProgressing {
+ logger = r.logger.V(6)
+ } else {
+ logger = r.logger
+ }
+
+ if healthCheckResult.Status == gardencorev1beta1.ConditionProgressing || healthCheckResult.Status == gardencorev1beta1.ConditionFalse {
+ if healthCheckResult.FailedChecks > 0 {
+ r.logger.Info("Updating HealthCheckCondition for extension resource to ConditionCheckError.", "kind", r.registeredExtension.groupVersionKind.Kind, "health condition type", healthCheckResult.HealthConditionType, "name", request.Name, "namespace", request.Namespace)
+ conditions = append(conditions, extensionConditionCheckError(conditionBuilder, healthCheckResult.HealthConditionType, r.registeredExtension.groupVersionKind.Kind, healthCheckResult))
+ continue
+ }
+
+ logger.Info("Health check for extension resource progressing or unsuccessful.", "kind", fmt.Sprintf("%s.%s.%s", r.registeredExtension.groupVersionKind.Kind, r.registeredExtension.groupVersionKind.Group, r.registeredExtension.groupVersionKind.Version), "name", request.Name, "namespace", request.Namespace, "failed", healthCheckResult.FailedChecks, "progressing", healthCheckResult.ProgressingChecks, "successful", healthCheckResult.SuccessfulChecks, "details", healthCheckResult.GetDetails())
+ conditions = append(conditions, extensionConditionUnsuccessful(conditionBuilder, healthCheckResult.HealthConditionType, extension, healthCheckResult))
+ continue
+ }
+
+ logger.Info("Health check for extension resource successful.", "kind", r.registeredExtension.groupVersionKind.Kind, "health condition type", healthCheckResult.HealthConditionType, "name", request.Name, "namespace", request.Namespace)
+ conditions = append(conditions, extensionConditionSuccessful(conditionBuilder, healthCheckResult.HealthConditionType, healthCheckResult))
+ }
+
+ if err := r.updateExtensionConditions(ctx, extension, conditions...); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ return r.resultWithRequeue(), nil
+}
+
+func extensionConditionFailedToExecute(conditionBuilder gardencorev1beta1helper.ConditionBuilder, healthConditionType string, kind string, executionError error) condition {
+ conditionBuilder.
+ WithStatus(gardencorev1beta1.ConditionUnknown).
+ WithReason(gardencorev1beta1.ConditionCheckError).
+ WithMessage(fmt.Sprintf("failed to execute health checks for '%s': %v", kind, executionError.Error()))
+ return condition{
+ builder: conditionBuilder,
+ healthConditionType: healthConditionType,
+ }
+}
+
+func extensionConditionCheckError(conditionBuilder gardencorev1beta1helper.ConditionBuilder, healthConditionType string, kind string, healthCheckResult Result) condition {
+ conditionBuilder.
+ WithStatus(gardencorev1beta1.ConditionUnknown).
+ WithReason(gardencorev1beta1.ConditionCheckError).
+ WithMessage(fmt.Sprintf("failed to execute %d/%d health checks for '%s': %v", healthCheckResult.FailedChecks, healthCheckResult.SuccessfulChecks+healthCheckResult.UnsuccessfulChecks+healthCheckResult.FailedChecks, kind, healthCheckResult.GetDetails()))
+ return condition{
+ builder: conditionBuilder,
+ healthConditionType: healthConditionType,
+ }
+}
+
+func extensionConditionUnsuccessful(conditionBuilder gardencorev1beta1helper.ConditionBuilder, healthConditionType string, extension extensionsv1alpha1.Object, healthCheckResult Result) condition {
+ var (
+ numberOfChecks = healthCheckResult.UnsuccessfulChecks + healthCheckResult.ProgressingChecks + healthCheckResult.SuccessfulChecks
+ detail = fmt.Sprintf("Health check summary: %d/%d unsuccessful, %d/%d progressing, %d/%d successful. %v", healthCheckResult.UnsuccessfulChecks, numberOfChecks, healthCheckResult.ProgressingChecks, numberOfChecks, healthCheckResult.SuccessfulChecks, numberOfChecks, healthCheckResult.GetDetails())
+ status = gardencorev1beta1.ConditionFalse
+ reason = ReasonUnsuccessful
+ )
+
+ if healthCheckResult.ProgressingChecks > 0 && healthCheckResult.ProgressingThreshold != nil {
+ if oldCondition := gardencorev1beta1helper.GetCondition(extension.GetExtensionStatus().GetConditions(), gardencorev1beta1.ConditionType(healthConditionType)); oldCondition == nil {
+ status = gardencorev1beta1.ConditionProgressing
+ reason = ReasonProgressing
+ } else if oldCondition.Status != gardencorev1beta1.ConditionFalse {
+ delta := time.Now().UTC().Sub(oldCondition.LastTransitionTime.Time.UTC())
+ if oldCondition.Status == gardencorev1beta1.ConditionTrue || delta <= *healthCheckResult.ProgressingThreshold {
+ status = gardencorev1beta1.ConditionProgressing
+ reason = ReasonProgressing
+ }
+ }
+ }
+
+ conditionBuilder.
+ WithStatus(status).
+ WithReason(reason).
+ WithCodes(healthCheckResult.Codes...).
+ WithMessage(detail)
+ return condition{
+ builder: conditionBuilder,
+ healthConditionType: healthConditionType,
+ }
+}
+
+func extensionConditionSuccessful(conditionBuilder gardencorev1beta1helper.ConditionBuilder, healthConditionType string, healthCheckResult Result) condition {
+ conditionBuilder.
+ WithStatus(gardencorev1beta1.ConditionTrue).
+ WithReason(ReasonSuccessful).
+ WithMessage(fmt.Sprintf("(%d/%d) Health checks successful", healthCheckResult.SuccessfulChecks, healthCheckResult.SuccessfulChecks))
+ return condition{
+ builder: conditionBuilder,
+ healthConditionType: healthConditionType,
+ }
+}
+
+func extensionConditionHibernated(conditionBuilder gardencorev1beta1helper.ConditionBuilder, healthConditionType string) condition {
+ conditionBuilder.
+ WithStatus(gardencorev1beta1.ConditionTrue).
+ WithReason(ReasonSuccessful).
+ WithMessage("Shoot is hibernated")
+ return condition{
+ builder: conditionBuilder,
+ healthConditionType: healthConditionType,
+ }
+}
+
+type condition struct {
+ builder gardencorev1beta1helper.ConditionBuilder
+ healthConditionType string
+}
+
+func (r *reconciler) updateExtensionConditions(ctx context.Context, extension extensionsv1alpha1.Object, conditions ...condition) error {
+ return extensionscontroller.TryPatchStatus(ctx, retry.DefaultBackoff, r.client, extension, func() error {
+ for _, cond := range conditions {
+ now := metav1.Now()
+ if c := gardencorev1beta1helper.GetCondition(extension.GetExtensionStatus().GetConditions(), gardencorev1beta1.ConditionType(cond.healthConditionType)); c != nil {
+ cond.builder.WithOldCondition(*c)
+ }
+ updatedCondition, _ := cond.builder.WithNowFunc(func() metav1.Time { return now }).Build()
+ // always update - the Gardenlet expects a recent health check
+ updatedCondition.LastUpdateTime = now
+ extension.GetExtensionStatus().SetConditions(gardencorev1beta1helper.MergeConditions(extension.GetExtensionStatus().GetConditions(), updatedCondition))
+ }
+ return nil
+ })
+}
+
+func (r *reconciler) resultWithRequeue() reconcile.Result {
+ return reconcile.Result{RequeueAfter: r.syncPeriod.Duration}
+}
+
+func isInMigration(accessor extensionsv1alpha1.Object) bool {
+ annotations := accessor.GetAnnotations()
+ if annotations != nil &&
+ annotations[gardenv1beta1constants.GardenerOperation] == gardenv1beta1constants.GardenerOperationMigrate {
+ return true
+ }
+
+ status := accessor.GetExtensionStatus()
+ if status == nil {
+ return false
+ }
+
+ lastOperation := status.GetLastOperation()
+ return lastOperation != nil && lastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/log.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/log.go
new file mode 100644
index 0000000..18a5590
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/log.go
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "fmt"
+
+ "github.com/go-logr/logr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+// CreateEventLogger creates a Logger with keys and values from the given CreateEvent.
+func CreateEventLogger(log logr.Logger, event event.CreateEvent) logr.Logger {
+ return log.WithValues(CreateEventLogValues(event)...)
+}
+
+// UpdateEventLogger creates a Logger with keys and values from the given UpdateEvent.
+func UpdateEventLogger(log logr.Logger, event event.UpdateEvent) logr.Logger {
+ return log.WithValues(UpdateEventLogValues(event)...)
+}
+
+// DeleteEventLogger creates a Logger with keys and values from the given DeleteEvent.
+func DeleteEventLogger(log logr.Logger, event event.DeleteEvent) logr.Logger {
+ return log.WithValues(DeleteEventLogValues(event)...)
+}
+
+// GenericEventLogger creates a Logger with keys and values from the given GenericEvent.
+func GenericEventLogger(log logr.Logger, event event.GenericEvent) logr.Logger {
+ return log.WithValues(GenericEventLogValues(event)...)
+}
+
+// PrefixLogValues prefixes the keys of the given logValues with the given prefix.
+func PrefixLogValues(prefix string, logValues []interface{}) []interface{} {
+ if prefix == "" {
+ return logValues
+ }
+ if logValues == nil {
+ return logValues
+ }
+
+ out := make([]interface{}, 0, len(logValues))
+ for i := 0; i < len(logValues); i += 2 {
+ key := logValues[i]
+ value := logValues[i+1]
+ out = append(out, fmt.Sprintf("%s.%s", prefix, key), value)
+ }
+ return out
+}
+
+// CreateEventLogValues extracts the log values from the given CreateEvent.
+func CreateEventLogValues(event event.CreateEvent) []interface{} {
+ return ObjectLogValues(event.Object)
+}
+
+// DeleteEventLogValues extracts the log values from the given DeleteEvent.
+func DeleteEventLogValues(event event.DeleteEvent) []interface{} {
+ return append(ObjectLogValues(event.Object), "delete-state-unknown", event.DeleteStateUnknown)
+}
+
+// GenericEventLogValues extracts the log values from the given GenericEvent.
+func GenericEventLogValues(event event.GenericEvent) []interface{} {
+ return ObjectLogValues(event.Object)
+}
+
+// UpdateEventLogValues extracts the log values from the given UpdateEvent.
+func UpdateEventLogValues(event event.UpdateEvent) []interface{} {
+ var values []interface{}
+ values = append(values, PrefixLogValues("old", ObjectLogValues(event.ObjectOld))...)
+ values = append(values, PrefixLogValues("new", ObjectLogValues(event.ObjectNew))...)
+ return values
+}
+
+// ObjectLogValues extracts the log values from the given client.Object.
+func ObjectLogValues(obj client.Object) []interface{} {
+ values := []interface{}{"meta.name", obj.GetName()}
+ if namespace := obj.GetNamespace(); namespace != "" {
+ values = append(values, "meta.namespace", namespace)
+ }
+ apiVersion, kind := obj.GetObjectKind().GroupVersionKind().ToAPIVersionAndKind()
+ values = append(values, "object.apiVersion", apiVersion, "object.kind", kind)
+ return values
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/managedresources.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/managedresources.go
new file mode 100644
index 0000000..466b1d3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/managedresources.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "context"
+
+ "github.com/gardener/gardener/pkg/chartrenderer"
+ "github.com/gardener/gardener/pkg/utils/chart"
+ "github.com/gardener/gardener/pkg/utils/imagevector"
+ "github.com/gardener/gardener/pkg/utils/managedresources"
+
+ "github.com/pkg/errors"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// RenderChartAndCreateManagedResource renders a chart and creates a ManagedResource for the gardener-resource-manager
+// out of the results.
+func RenderChartAndCreateManagedResource(ctx context.Context, namespace string, name string, client client.Client, chartRenderer chartrenderer.Interface, chart chart.Interface, values map[string]interface{}, imageVector imagevector.ImageVector, chartNamespace string, version string, withNoCleanupLabel bool, forceOverwriteAnnotations bool) error {
+ chartName, data, err := chart.Render(chartRenderer, chartNamespace, imageVector, version, version, values)
+ if err != nil {
+ return errors.Wrapf(err, "could not render chart")
+ }
+
+ // Create or update managed resource referencing the previously created secret
+ var injectedLabels map[string]string
+ if withNoCleanupLabel {
+ injectedLabels = map[string]string{ShootNoCleanupLabel: "true"}
+ }
+
+ return managedresources.CreateManagedResource(ctx, client, namespace, name, "", chartName, data, false, injectedLabels, forceOverwriteAnnotations)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/reconciler.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/reconciler.go
new file mode 100644
index 0000000..f4b2743
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/reconciler.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "context"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+)
+
+type operationAnnotationWrapper struct {
+ reconcile.Reconciler
+ client client.Client
+ newObjFunc func() client.Object
+}
+
+// OperationAnnotationWrapper is a wrapper for an reconciler that
+// removes the Gardener operation annotation before `Reconcile` is called.
+//
+// This is useful in conjunction with the HasOperationAnnotationPredicate.
+func OperationAnnotationWrapper(newObjFunc func() client.Object, reconciler reconcile.Reconciler) reconcile.Reconciler {
+ return &operationAnnotationWrapper{
+ newObjFunc: newObjFunc,
+ Reconciler: reconciler,
+ }
+}
+
+// InjectClient implements inject.Client.
+func (o *operationAnnotationWrapper) InjectClient(client client.Client) error {
+ o.client = client
+ return nil
+}
+
+// InjectClient implements inject.Func.
+func (o *operationAnnotationWrapper) InjectFunc(f inject.Func) error {
+ return f(o.Reconciler)
+}
+
+// Reconcile removes the Gardener operation annotation if available and calls the inner `Reconcile`.
+func (o *operationAnnotationWrapper) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
+ obj := o.newObjFunc()
+ if err := o.client.Get(ctx, request.NamespacedName, obj); client.IgnoreNotFound(err) != nil {
+ return reconcile.Result{}, err
+ }
+
+ annotations := obj.GetAnnotations()
+ if annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationWaitForState {
+ return reconcile.Result{}, nil
+ }
+
+ if annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationReconcile {
+ withOpAnnotation := obj.DeepCopyObject()
+ delete(annotations, v1beta1constants.GardenerOperation)
+ obj.SetAnnotations(annotations)
+ if err := o.client.Patch(ctx, obj, client.MergeFrom(withOpAnnotation)); err != nil {
+ return reconcile.Result{}, err
+ }
+ }
+ return o.Reconciler.Reconcile(ctx, request)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/shoot.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/shoot.go
new file mode 100644
index 0000000..49ed0a8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/shoot.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "github.com/gardener/gardener/pkg/chartrenderer"
+)
+
+const (
+ // ShootNoCleanupLabel is a constant for a label on a resource indicating that the Gardener cleaner should not delete this
+ // resource when cleaning a shoot during the deletion flow.
+ ShootNoCleanupLabel = "shoot.gardener.cloud/no-cleanup"
+)
+
+// ChartRendererFactory creates chartrenderer.Interface to be used by this actuator.
+type ChartRendererFactory interface {
+ // NewChartRendererForShoot creates a new chartrenderer.Interface for the shoot cluster.
+ NewChartRendererForShoot(string) (chartrenderer.Interface, error)
+}
+
+// ChartRendererFactoryFunc is a function that satisfies ChartRendererFactory.
+type ChartRendererFactoryFunc func(string) (chartrenderer.Interface, error)
+
+// NewChartRendererForShoot creates a new chartrenderer.Interface for the shoot cluster.
+func (f ChartRendererFactoryFunc) NewChartRendererForShoot(version string) (chartrenderer.Interface, error) {
+ return f(version)
+}
+
+// GetPodNetwork returns the pod network CIDR of the given Shoot.
+func GetPodNetwork(cluster *Cluster) string {
+ if cluster.Shoot.Spec.Networking.Pods != nil {
+ return *cluster.Shoot.Spec.Networking.Pods
+ }
+ return ""
+}
+
+// GetServiceNetwork returns the service network CIDR of the given Shoot.
+func GetServiceNetwork(cluster *Cluster) string {
+ if cluster.Shoot.Spec.Networking.Services != nil {
+ return *cluster.Shoot.Spec.Networking.Services
+ }
+ return ""
+}
+
+// IsHibernated returns true if the shoot is hibernated, or false otherwise.
+func IsHibernated(cluster *Cluster) bool {
+ return cluster.Shoot.Spec.Hibernation != nil && cluster.Shoot.Spec.Hibernation.Enabled != nil && *cluster.Shoot.Spec.Hibernation.Enabled
+}
+
+// IsFailed returns true if the embedded shoot is failed, or false otherwise.
+func IsFailed(cluster *Cluster) bool {
+ return IsShootFailed(cluster.Shoot)
+}
+
+// IsShootFailed returns true if the shoot is failed, or false otherwise.
+func IsShootFailed(shoot *gardencorev1beta1.Shoot) bool {
+ if shoot == nil {
+ return false
+ }
+ lastOperation := shoot.Status.LastOperation
+ return lastOperation != nil && lastOperation.State == gardencorev1beta1.LastOperationStateFailed
+}
+
+// IsUnmanagedDNSProvider returns true if the shoot uses an unmanaged DNS provider.
+func IsUnmanagedDNSProvider(cluster *Cluster) bool {
+ dns := cluster.Shoot.Spec.DNS
+ return dns == nil || (dns.Domain == nil && len(dns.Providers) > 0 && dns.Providers[0].Type != nil && *dns.Providers[0].Type == "unmanaged")
+}
+
+// GetReplicas returns the woken up replicas of the given Shoot.
+func GetReplicas(cluster *Cluster, wokenUp int) int {
+ if IsHibernated(cluster) {
+ return 0
+ }
+ return wokenUp
+}
+
+// GetControlPlaneReplicas returns the woken up replicas for controlplane components of the given Shoot
+// that should only be scaled down at the end of the flow.
+func GetControlPlaneReplicas(cluster *Cluster, scaledDown bool, wokenUp int) int {
+ if cluster.Shoot != nil && cluster.Shoot.DeletionTimestamp == nil && IsHibernated(cluster) && scaledDown {
+ return 0
+ }
+ return wokenUp
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/status.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/status.go
new file mode 100644
index 0000000..5361fb5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/status.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// LastOperation creates a new LastOperation from the given parameters.
+func LastOperation(t gardencorev1beta1.LastOperationType, state gardencorev1beta1.LastOperationState, progress int32, description string) *gardencorev1beta1.LastOperation {
+ return &gardencorev1beta1.LastOperation{
+ LastUpdateTime: metav1.Now(),
+ Type: t,
+ State: state,
+ Description: description,
+ Progress: progress,
+ }
+}
+
+// LastError creates a new LastError from the given parameters.
+func LastError(description string, codes ...gardencorev1beta1.ErrorCode) *gardencorev1beta1.LastError {
+ now := metav1.Now()
+
+ return &gardencorev1beta1.LastError{
+ Description: description,
+ Codes: codes,
+ LastUpdateTime: &now,
+ }
+}
+
+// ReconcileSucceeded returns a LastOperation with state succeeded at 100 percent and a nil LastError.
+func ReconcileSucceeded(t gardencorev1beta1.LastOperationType, description string) (*gardencorev1beta1.LastOperation, *gardencorev1beta1.LastError) {
+ return LastOperation(t, gardencorev1beta1.LastOperationStateSucceeded, 100, description), nil
+}
+
+// ReconcileError returns a LastOperation with state error and a LastError with the given description and codes.
+func ReconcileError(t gardencorev1beta1.LastOperationType, description string, progress int32, codes ...gardencorev1beta1.ErrorCode) (*gardencorev1beta1.LastOperation, *gardencorev1beta1.LastError) {
+ return LastOperation(t, gardencorev1beta1.LastOperationStateError, progress, description), LastError(description, codes...)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/utils.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/utils.go
new file mode 100644
index 0000000..36b00a5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/utils.go
@@ -0,0 +1,257 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+
+ controllererror "github.com/gardener/gardener/extensions/pkg/controller/error"
+ "github.com/gardener/gardener/pkg/api/extensions"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ "github.com/gardener/gardener/pkg/controllerutils"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+
+ resourcemanagerv1alpha1 "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ autoscalingv1beta2 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var (
+ localSchemeBuilder = runtime.NewSchemeBuilder(
+ scheme.AddToScheme,
+ extensionsv1alpha1.AddToScheme,
+ resourcemanagerv1alpha1.AddToScheme,
+ )
+
+ // AddToScheme adds the Kubernetes and extension scheme to the given scheme.
+ AddToScheme = localSchemeBuilder.AddToScheme
+
+ // ExtensionsScheme is the default scheme for extensions, consisting of all Kubernetes built-in
+ // schemes (client-go/kubernetes/scheme) and the extensions/v1alpha1 scheme.
+ ExtensionsScheme = runtime.NewScheme()
+)
+
+func init() {
+ utilruntime.Must(AddToScheme(ExtensionsScheme))
+}
+
+// ReconcileErr returns a reconcile.Result or an error, depending on whether the error is a
+// RequeueAfterError or not.
+func ReconcileErr(err error) (reconcile.Result, error) {
+ if requeueAfter, ok := err.(*controllererror.RequeueAfterError); ok {
+ return reconcile.Result{Requeue: true, RequeueAfter: requeueAfter.RequeueAfter}, nil
+ }
+ return reconcile.Result{}, err
+}
+
+// ReconcileErrCause returns the cause in case the error is an RequeueAfterError. Otherwise,
+// it returns the input error.
+func ReconcileErrCause(err error) error {
+ if requeueAfter, ok := err.(*controllererror.RequeueAfterError); ok {
+ return requeueAfter.Cause
+ }
+ return err
+}
+
+// ReconcileErrCauseOrErr returns the cause of the error or the error if the cause is nil.
+func ReconcileErrCauseOrErr(err error) error {
+ if cause := ReconcileErrCause(err); cause != nil {
+ return cause
+ }
+ return err
+}
+
+// AddToManagerBuilder aggregates various AddToManager functions.
+type AddToManagerBuilder []func(manager.Manager) error
+
+// NewAddToManagerBuilder creates a new AddToManagerBuilder and registers the given functions.
+func NewAddToManagerBuilder(funcs ...func(manager.Manager) error) AddToManagerBuilder {
+ var builder AddToManagerBuilder
+ builder.Register(funcs...)
+ return builder
+}
+
+// Register registers the given functions in this builder.
+func (a *AddToManagerBuilder) Register(funcs ...func(manager.Manager) error) {
+ *a = append(*a, funcs...)
+}
+
+// AddToManager traverses over all AddToManager-functions of this builder, sequentially applying
+// them. It exits on the first error and returns it.
+func (a *AddToManagerBuilder) AddToManager(m manager.Manager) error {
+ for _, f := range *a {
+ if err := f(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func finalizersAndAccessorOf(obj runtime.Object) (sets.String, metav1.Object, error) {
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sets.NewString(accessor.GetFinalizers()...), accessor, nil
+}
+
+// HasFinalizer checks if the given object has a finalizer with the given name.
+func HasFinalizer(obj runtime.Object, finalizerName string) (bool, error) {
+ finalizers, _, err := finalizersAndAccessorOf(obj)
+ if err != nil {
+ return false, err
+ }
+
+ return finalizers.Has(finalizerName), nil
+}
+
+// EnsureFinalizer ensures that a finalizer of the given name is set on the given object.
+// If the finalizer is not set, it adds it to the list of finalizers and updates the remote object.
+var EnsureFinalizer = controllerutils.EnsureFinalizer
+
+// DeleteFinalizer ensures that the given finalizer is not present anymore in the given object.
+// If it is set, it removes it and issues an update.
+var DeleteFinalizer = controllerutils.RemoveFinalizer
+
+// DeleteAllFinalizers removes all finalizers from the object and issues an update.
+func DeleteAllFinalizers(ctx context.Context, client client.Client, obj client.Object) error {
+ return TryUpdate(ctx, retry.DefaultBackoff, client, obj, func() error {
+ obj.SetFinalizers(nil)
+ return nil
+ })
+}
+
+// SecretReferenceToKey returns the key of the given SecretReference.
+func SecretReferenceToKey(ref *corev1.SecretReference) client.ObjectKey {
+ return kutil.Key(ref.Namespace, ref.Name)
+}
+
+// GetSecretByReference returns the Secret object matching the given SecretReference.
+func GetSecretByReference(ctx context.Context, c client.Client, ref *corev1.SecretReference) (*corev1.Secret, error) {
+ secret := &corev1.Secret{}
+ if err := c.Get(ctx, SecretReferenceToKey(ref), secret); err != nil {
+ return nil, err
+ }
+ return secret, nil
+}
+
+// TryPatch tries to apply the given transformation function onto the given object, and to patch it afterwards with optimistic locking.
+// It retries the patch with an exponential backoff.
+var TryPatch = kutil.TryPatch
+
+// TryPatchStatus tries to apply the given transformation function onto the given object, and to patch its
+// status afterwards with optimistic locking. It retries the status patch with an exponential backoff.
+var TryPatchStatus = kutil.TryPatchStatus
+
+// TryUpdate tries to apply the given transformation function onto the given object, and to update it afterwards.
+// It retries the update with an exponential backoff.
+var TryUpdate = kutil.TryUpdate
+
+// TryUpdateStatus tries to apply the given transformation function onto the given object, and to update its
+// status afterwards. It retries the status update with an exponential backoff.
+var TryUpdateStatus = kutil.TryUpdateStatus
+
+// WatchBuilder holds various functions which add watch controls to the passed Controller.
+type WatchBuilder []func(controller.Controller) error
+
+// NewWatchBuilder creates a new WatchBuilder and registers the given functions.
+func NewWatchBuilder(funcs ...func(controller.Controller) error) WatchBuilder {
+ var builder WatchBuilder
+ builder.Register(funcs...)
+ return builder
+}
+
+// Register adds a function which add watch controls to the passed Controller to the WatchBuilder.
+func (w *WatchBuilder) Register(funcs ...func(controller.Controller) error) {
+ *w = append(*w, funcs...)
+}
+
+// AddToController adds the registered watches to the passed controller.
+func (w *WatchBuilder) AddToController(ctrl controller.Controller) error {
+ for _, f := range *w {
+ if err := f(ctrl); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnsafeGuessKind makes an unsafe guess what is the kind of the given object.
+//
+// The argument to this method _has_ to be a pointer, otherwise it panics.
+func UnsafeGuessKind(obj runtime.Object) string {
+ t := reflect.TypeOf(obj)
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("kind of obj %T is not pointer", obj))
+ }
+
+ return t.Elem().Name()
+}
+
+// GetVerticalPodAutoscalerObject returns unstructured.Unstructured representing autoscalingv1beta2.VerticalPodAutoscaler
+func GetVerticalPodAutoscalerObject() *unstructured.Unstructured {
+ obj := &unstructured.Unstructured{}
+ obj.SetAPIVersion(autoscalingv1beta2.SchemeGroupVersion.String())
+ obj.SetKind("VerticalPodAutoscaler")
+ return obj
+}
+
+// RemoveAnnotation removes an annotation key passed as annotation
+func RemoveAnnotation(ctx context.Context, c client.Client, obj client.Object, annotation string) error {
+ withAnnotation := obj.DeepCopyObject()
+
+ annotations := obj.GetAnnotations()
+ delete(annotations, annotation)
+ obj.SetAnnotations(annotations)
+
+ return c.Patch(ctx, obj, client.MergeFrom(withAnnotation))
+}
+
+// IsMigrated checks if an extension object has been migrated
+func IsMigrated(obj runtime.Object) bool {
+ acc, err := extensions.Accessor(obj)
+ if err != nil {
+ return false
+ }
+
+ lastOp := acc.GetExtensionStatus().GetLastOperation()
+ return lastOp != nil &&
+ lastOp.Type == gardencorev1beta1.LastOperationTypeMigrate &&
+ lastOp.State == gardencorev1beta1.LastOperationStateSucceeded
+}
+
+// GetObjectByReference gets an object by the given reference, in the given namespace.
+// If the object kind doesn't match the given reference kind this will result in an error.
+func GetObjectByReference(ctx context.Context, c client.Client, ref *autoscalingv1.CrossVersionObjectReference, namespace string, obj client.Object) error {
+ return c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: v1beta1constants.ReferencedResourcesPrefix + ref.Name}, obj)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/handler/enqueue_mapped.go b/vendor/github.com/gardener/gardener/extensions/pkg/handler/enqueue_mapped.go
new file mode 100644
index 0000000..1572494
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/handler/enqueue_mapped.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package handler
+
+import (
+ "k8s.io/client-go/util/workqueue"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+)
+
+// Mapper maps an object to a collection of keys to be enqueued
+type Mapper interface {
+ // Map maps an object
+ Map(obj client.Object) []reconcile.Request
+}
+
+var _ Mapper = MapFunc(nil)
+
+// MapFunc is the signature required for enqueueing requests from a generic function.
+// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler.
+type MapFunc func(client.Object) []reconcile.Request
+
+// Map implements Mapper.
+func (f MapFunc) Map(obj client.Object) []reconcile.Request {
+ return f(obj)
+}
+
+// EnqueueRequestsFromMapper is similar to controller-runtime's handler.EnqueueRequestsFromMapFunc.
+// Instead of taking only a MapFunc it also allows passing a Mapper interface. Also, it allows customizing the
+// behaviour on UpdateEvents.
+// For UpdateEvents, the given UpdateBehaviour decides if only the old, only the new or both objects should be mapped
+// and enqueued.
+func EnqueueRequestsFromMapper(m Mapper, updateBehavior UpdateBehavior) handler.EventHandler {
+ return &enqueueRequestsFromMapFunc{
+ mapper: m,
+ updateBehavior: updateBehavior,
+ }
+}
+
+type enqueueRequestsFromMapFunc struct {
+ // mapper transforms the argument into a slice of keys to be reconciled
+ mapper Mapper
+ // updateBehaviour decides which object(s) to map and enqueue on updates
+ updateBehavior UpdateBehavior
+}
+
+// Create implements EventHandler
+func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
+ e.mapAndEnqueue(q, evt.Object)
+}
+
+// Update implements EventHandler
+func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
+ switch e.updateBehavior {
+ case UpdateWithOldAndNew:
+ e.mapAndEnqueue(q, evt.ObjectOld)
+ e.mapAndEnqueue(q, evt.ObjectNew)
+ case UpdateWithOld:
+ e.mapAndEnqueue(q, evt.ObjectOld)
+ case UpdateWithNew:
+ e.mapAndEnqueue(q, evt.ObjectNew)
+ }
+}
+
+// Delete implements EventHandler
+func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
+ e.mapAndEnqueue(q, evt.Object)
+}
+
+// Generic implements EventHandler
+func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
+ e.mapAndEnqueue(q, evt.Object)
+}
+
+func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object) {
+ for _, req := range e.mapper.Map(object) {
+ q.Add(req)
+ }
+}
+
+// EnqueueRequestsFromMapper can inject fields into the mapper.
+
+// InjectFunc implements inject.Injector.
+func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error {
+ if f == nil {
+ return nil
+ }
+ return f(e.mapper)
+}
+
+// UpdateBehavior determines how an update should be handled.
+type UpdateBehavior uint8
+
+const (
+ // UpdateWithOldAndNew considers both, the old as well as the new object, in case of an update.
+ UpdateWithOldAndNew UpdateBehavior = iota
+ // UpdateWithOld considers only the old object in case of an update.
+ UpdateWithOld
+ // UpdateWithNew considers only the new object in case of an update.
+ UpdateWithNew
+)
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/handler/mapper.go b/vendor/github.com/gardener/gardener/extensions/pkg/handler/mapper.go
new file mode 100644
index 0000000..2e80a84
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/handler/mapper.go
@@ -0,0 +1,94 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package handler
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+
+ extensionspredicate "github.com/gardener/gardener/extensions/pkg/predicate"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ contextutil "github.com/gardener/gardener/pkg/utils/context"
+)
+
+type clusterToObjectMapper struct {
+ ctx context.Context
+ client client.Client
+ newObjListFunc func() client.ObjectList
+ predicates []predicate.Predicate
+}
+
+func (m *clusterToObjectMapper) InjectClient(c client.Client) error {
+ m.client = c
+ return nil
+}
+
+func (m *clusterToObjectMapper) InjectStopChannel(stopCh <-chan struct{}) error {
+ m.ctx = contextutil.FromStopChannel(stopCh)
+ return nil
+}
+
+func (m *clusterToObjectMapper) InjectFunc(f inject.Func) error {
+ for _, p := range m.predicates {
+ if err := f(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *clusterToObjectMapper) Map(obj client.Object) []reconcile.Request {
+ cluster, ok := obj.(*extensionsv1alpha1.Cluster)
+ if !ok {
+ return nil
+ }
+
+ objList := m.newObjListFunc()
+ if err := m.client.List(m.ctx, objList, client.InNamespace(cluster.Name)); err != nil {
+ return nil
+ }
+
+ var requests []reconcile.Request
+
+ utilruntime.HandleError(meta.EachListItem(objList, func(obj runtime.Object) error {
+ o := obj.(client.Object)
+ if !extensionspredicate.EvalGeneric(o, m.predicates...) {
+ return nil
+ }
+
+ requests = append(requests, reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Namespace: o.GetNamespace(),
+ Name: o.GetName(),
+ },
+ })
+ return nil
+ }))
+ return requests
+}
+
+// ClusterToObjectMapper returns a mapper that returns requests for objects whose
+// referenced clusters have been modified.
+func ClusterToObjectMapper(newObjListFunc func() client.ObjectList, predicates []predicate.Predicate) Mapper {
+ return &clusterToObjectMapper{newObjListFunc: newObjListFunc, predicates: predicates}
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/inject/inject.go b/vendor/github.com/gardener/gardener/extensions/pkg/inject/inject.go
new file mode 100644
index 0000000..a8949d3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/inject/inject.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package inject
+
+import (
+ "context"
+
+ contextutil "github.com/gardener/gardener/pkg/utils/context"
+
+ "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// WithClient contains an instance of `client.Client`.
+type WithClient struct {
+ Client client.Client
+}
+
+// InjectClient implements `inject.InjectClient`.
+func (w *WithClient) InjectClient(c client.Client) error {
+ w.Client = c
+ return nil
+}
+
+// WithStopChannel contains a stop channel.
+type WithStopChannel struct {
+ StopChannel <-chan struct{}
+}
+
+// InjectStopChannel implements `inject.InjectStopChannel`.
+func (w *WithStopChannel) InjectStopChannel(stopChan <-chan struct{}) error {
+ w.StopChannel = stopChan
+ return nil
+}
+
+// WithContext contains a `context.Context`.
+type WithContext struct {
+ Context context.Context
+}
+
+// InjectStopChannel implements `inject.InjectStopChannel`.
+func (w *WithContext) InjectStopChannel(stopChan <-chan struct{}) error {
+ w.Context = contextutil.FromStopChannel(stopChan)
+ return nil
+}
+
+// WithCache contains an instance of `cache.Cache`.
+type WithCache struct {
+ Cache cache.Cache
+}
+
+// InjectCache implements `inject.InjectCache`.
+func (w *WithCache) InjectCache(cache cache.Cache) error {
+ w.Cache = cache
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/log/log.go b/vendor/github.com/gardener/gardener/extensions/pkg/log/log.go
new file mode 100644
index 0000000..6913610
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/log/log.go
@@ -0,0 +1,42 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "github.com/go-logr/logr"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ logzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+// ZapLogger is a Logger implementation.
+// If development is true, a Zap development config will be used
+// (stacktraces on warnings, no sampling), otherwise a Zap production
+// config will be used (stacktraces on errors, sampling).
+// Additionally, the time encoding is adjusted to `zapcore.ISO8601TimeEncoder`.
+func ZapLogger(development bool) logr.Logger {
+ return logzap.New(func(o *logzap.Options) {
+ var encCfg zapcore.EncoderConfig
+ if development {
+ encCfg = zap.NewDevelopmentEncoderConfig()
+ } else {
+ encCfg = zap.NewProductionEncoderConfig()
+ }
+ encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
+
+ o.Encoder = zapcore.NewJSONEncoder(encCfg)
+ o.Development = development
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/predicate/mapper.go b/vendor/github.com/gardener/gardener/extensions/pkg/predicate/mapper.go
new file mode 100644
index 0000000..a24c79a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/predicate/mapper.go
@@ -0,0 +1,105 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package predicate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+)
+
+// MapperTrigger is a trigger a Mapper can react upon.
+type MapperTrigger uint8
+
+const (
+ // CreateTrigger is a MapperTrigger for create events.
+ CreateTrigger MapperTrigger = iota
+ // UpdateOldTrigger is a MapperTrigger for update events with the old meta and object.
+ UpdateOldTrigger
+ // UpdateNewTrigger is a MapperTrigger for update events with the new meta and object.
+ UpdateNewTrigger
+ // DeleteTrigger is a MapperTrigger for delete events.
+ DeleteTrigger
+ // GenericTrigger is a MapperTrigger for generic events.
+ GenericTrigger
+)
+
+// Mapper maps any event (in form of a GenericEvent) to a boolean whether the event shall be
+// propagated or not.
+type Mapper interface {
+ Map(event event.GenericEvent) bool
+}
+
+// MapperFunc is a function that implements Mapper.
+type MapperFunc func(event.GenericEvent) bool
+
+// Map implements Mapper.
+func (f MapperFunc) Map(event event.GenericEvent) bool {
+ return f(event)
+}
+
+type mapperWithTriggers struct {
+ triggers map[MapperTrigger]struct{}
+ mapper Mapper
+}
+
+// FromMapper creates a new predicate from the given Mapper that reacts on the given MapperTriggers.
+func FromMapper(mapper Mapper, triggers ...MapperTrigger) predicate.Predicate {
+ t := make(map[MapperTrigger]struct{})
+ for _, trigger := range triggers {
+ t[trigger] = struct{}{}
+ }
+ return &mapperWithTriggers{t, mapper}
+}
+
+// InjectFunc implements Injector.
+func (m *mapperWithTriggers) InjectFunc(f inject.Func) error {
+ return f(m.mapper)
+}
+
+// Create implements Predicate.
+func (m *mapperWithTriggers) Create(e event.CreateEvent) bool {
+ if _, ok := m.triggers[CreateTrigger]; ok {
+ return m.mapper.Map(event.GenericEvent(e))
+ }
+ return true
+}
+
+// Delete implements Predicate.
+func (m *mapperWithTriggers) Delete(e event.DeleteEvent) bool {
+ if _, ok := m.triggers[DeleteTrigger]; ok {
+ return m.mapper.Map(event.GenericEvent{Object: e.Object})
+ }
+ return true
+}
+
+// Update implements Predicate.
+func (m *mapperWithTriggers) Update(e event.UpdateEvent) bool {
+ if _, ok := m.triggers[UpdateOldTrigger]; ok {
+ return m.mapper.Map(event.GenericEvent{Object: e.ObjectOld})
+ }
+ if _, ok := m.triggers[UpdateNewTrigger]; ok {
+ return m.mapper.Map(event.GenericEvent{Object: e.ObjectNew})
+ }
+ return true
+}
+
+// Generic implements Predicate.
+func (m *mapperWithTriggers) Generic(e event.GenericEvent) bool {
+ if _, ok := m.triggers[GenericTrigger]; ok {
+ return m.mapper.Map(event.GenericEvent{Object: e.Object})
+ }
+ return true
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/predicate/predicate.go b/vendor/github.com/gardener/gardener/extensions/pkg/predicate/predicate.go
new file mode 100644
index 0000000..a43f43f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/predicate/predicate.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package predicate
+
+import (
+ "errors"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
+ extensionsinject "github.com/gardener/gardener/extensions/pkg/inject"
+ gardencore "github.com/gardener/gardener/pkg/api/core"
+ "github.com/gardener/gardener/pkg/api/extensions"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ "github.com/gardener/gardener/pkg/utils/version"
+
+ "github.com/go-logr/logr"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+)
+
+// Log is the logger for predicates.
+var Log logr.Logger = log.Log
+
+// EvalGeneric returns true if all predicates match for the given object.
+func EvalGeneric(obj client.Object, predicates ...predicate.Predicate) bool {
+ e := event.GenericEvent{Object: obj}
+ for _, p := range predicates {
+ if !p.Generic(e) {
+ return false
+ }
+ }
+
+ return true
+}
+
+type shootNotFailedMapper struct {
+ log logr.Logger
+ extensionsinject.WithClient
+ extensionsinject.WithContext
+ extensionsinject.WithCache
+}
+
+func (s *shootNotFailedMapper) Map(e event.GenericEvent) bool {
+ // Wait for cache sync because of backing client cache.
+ if !s.Cache.WaitForCacheSync(s.Context) {
+ err := errors.New("failed to wait for caches to sync")
+ s.log.Error(err, "Could not wait for Cache to sync", "predicate", "ShootNotFailed")
+ return false
+ }
+
+ cluster, err := extensionscontroller.GetCluster(s.Context, s.Client, e.Object.GetNamespace())
+ if err != nil {
+ s.log.Error(err, "Could not retrieve corresponding cluster")
+ return false
+ }
+
+ if extensionscontroller.IsFailed(cluster) {
+ return cluster.Shoot.Generation != cluster.Shoot.Status.ObservedGeneration
+ }
+
+ return true
+}
+
+// ShootNotFailed is a predicate for failed shoots.
+func ShootNotFailed() predicate.Predicate {
+ return FromMapper(&shootNotFailedMapper{log: Log.WithName("shoot-not-failed")},
+ CreateTrigger, UpdateNewTrigger, DeleteTrigger, GenericTrigger)
+}
+
+// HasType filters the incoming OperatingSystemConfigs for ones that have the same type
+// as the given type.
+func HasType(typeName string) predicate.Predicate {
+ return FromMapper(MapperFunc(func(e event.GenericEvent) bool {
+ acc, err := extensions.Accessor(e.Object)
+ if err != nil {
+ return false
+ }
+
+ return acc.GetExtensionSpec().GetExtensionType() == typeName
+ }), CreateTrigger, UpdateNewTrigger, DeleteTrigger, GenericTrigger)
+}
+
+// HasName returns a predicate that matches the given name of a resource.
+func HasName(name string) predicate.Predicate {
+ return FromMapper(MapperFunc(func(e event.GenericEvent) bool {
+ return e.Object.GetName() == name
+ }), CreateTrigger, UpdateNewTrigger, DeleteTrigger, GenericTrigger)
+}
+
+// HasOperationAnnotation is a predicate for the operation annotation.
+func HasOperationAnnotation() predicate.Predicate {
+ return FromMapper(MapperFunc(func(e event.GenericEvent) bool {
+ return e.Object.GetAnnotations()[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationReconcile ||
+ e.Object.GetAnnotations()[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationRestore ||
+ e.Object.GetAnnotations()[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationMigrate
+ }), CreateTrigger, UpdateNewTrigger, GenericTrigger)
+}
+
+// LastOperationNotSuccessful is a predicate for unsuccessful last operations **only** for creation events.
+func LastOperationNotSuccessful() predicate.Predicate {
+ operationNotSucceeded := func(obj runtime.Object) bool {
+ acc, err := extensions.Accessor(obj)
+ if err != nil {
+ return false
+ }
+
+ lastOp := acc.GetExtensionStatus().GetLastOperation()
+ return lastOp == nil ||
+ lastOp.State != gardencorev1beta1.LastOperationStateSucceeded
+ }
+
+ return predicate.Funcs{
+ CreateFunc: func(event event.CreateEvent) bool {
+ return operationNotSucceeded(event.Object)
+ },
+ UpdateFunc: func(event event.UpdateEvent) bool {
+ return false
+ },
+ GenericFunc: func(event event.GenericEvent) bool {
+ return false
+ },
+ DeleteFunc: func(event event.DeleteEvent) bool {
+ return false
+ },
+ }
+}
+
+// IsDeleting is a predicate for objects having a deletion timestamp.
+func IsDeleting() predicate.Predicate {
+ return FromMapper(MapperFunc(func(e event.GenericEvent) bool {
+ return e.Object.GetDeletionTimestamp() != nil
+ }), CreateTrigger, UpdateNewTrigger, GenericTrigger)
+}
+
+// AddTypePredicate returns a new slice which contains a type predicate and the given `predicates`.
+// if more than one extensionTypes is given all given types are or combined
+func AddTypePredicate(predicates []predicate.Predicate, extensionTypes ...string) []predicate.Predicate {
+ resultPredicates := make([]predicate.Predicate, 0, len(predicates)+1)
+ resultPredicates = append(resultPredicates, predicates...)
+
+ if len(extensionTypes) == 1 {
+ resultPredicates = append(resultPredicates, HasType(extensionTypes[0]))
+ return resultPredicates
+ }
+
+ orPreds := make([]predicate.Predicate, 0, len(extensionTypes))
+ for _, extensionType := range extensionTypes {
+ orPreds = append(orPreds, HasType(extensionType))
+ }
+
+ return append(resultPredicates, predicate.Or(orPreds...))
+}
+
+// HasPurpose filters the incoming Controlplanes for the given spec.purpose
+func HasPurpose(purpose extensionsv1alpha1.Purpose) predicate.Predicate {
+ return FromMapper(MapperFunc(func(e event.GenericEvent) bool {
+ controlPlane, ok := e.Object.(*extensionsv1alpha1.ControlPlane)
+ if !ok {
+ return false
+ }
+
+ // needed because ControlPlane of type "normal" has the spec.purpose field not set
+ if controlPlane.Spec.Purpose == nil && purpose == extensionsv1alpha1.Normal {
+ return true
+ }
+
+ if controlPlane.Spec.Purpose == nil {
+ return false
+ }
+
+ return *controlPlane.Spec.Purpose == purpose
+ }), CreateTrigger, UpdateNewTrigger, DeleteTrigger, GenericTrigger)
+}
+
+// ClusterShootProviderType is a predicate for the provider type of the shoot in the cluster resource.
+func ClusterShootProviderType(decoder runtime.Decoder, providerType string) predicate.Predicate {
+ f := func(obj runtime.Object) bool {
+ if obj == nil {
+ return false
+ }
+
+ cluster, ok := obj.(*extensionsv1alpha1.Cluster)
+ if !ok {
+ return false
+ }
+
+ shoot, err := extensionscontroller.ShootFromCluster(decoder, cluster)
+ if err != nil {
+ return false
+ }
+
+ return shoot.Spec.Provider.Type == providerType
+ }
+
+ return predicate.Funcs{
+ CreateFunc: func(event event.CreateEvent) bool {
+ return f(event.Object)
+ },
+ UpdateFunc: func(event event.UpdateEvent) bool {
+ return f(event.ObjectNew)
+ },
+ GenericFunc: func(event event.GenericEvent) bool {
+ return f(event.Object)
+ },
+ DeleteFunc: func(event event.DeleteEvent) bool {
+ return f(event.Object)
+ },
+ }
+}
+
+// GardenCoreProviderType is a predicate for the provider type of a `gardencore.Object` implementation.
+func GardenCoreProviderType(providerType string) predicate.Predicate {
+ f := func(obj runtime.Object) bool {
+ if obj == nil {
+ return false
+ }
+
+ accessor, err := gardencore.Accessor(obj)
+ if err != nil {
+ return false
+ }
+
+ return accessor.GetProviderType() == providerType
+ }
+
+ return predicate.Funcs{
+ CreateFunc: func(event event.CreateEvent) bool {
+ return f(event.Object)
+ },
+ UpdateFunc: func(event event.UpdateEvent) bool {
+ return f(event.ObjectNew)
+ },
+ GenericFunc: func(event event.GenericEvent) bool {
+ return f(event.Object)
+ },
+ DeleteFunc: func(event event.DeleteEvent) bool {
+ return f(event.Object)
+ },
+ }
+}
+
+// ClusterShootKubernetesVersionAtLeast is a predicate for the kubernetes version of the shoot in the cluster resource.
+func ClusterShootKubernetesVersionAtLeast(decoder runtime.Decoder, kubernetesVersion string) predicate.Predicate {
+ f := func(obj runtime.Object) bool {
+ if obj == nil {
+ return false
+ }
+
+ cluster, ok := obj.(*extensionsv1alpha1.Cluster)
+ if !ok {
+ return false
+ }
+
+ shoot, err := extensionscontroller.ShootFromCluster(decoder, cluster)
+ if err != nil {
+ return false
+ }
+
+ constraint, err := version.CompareVersions(shoot.Spec.Kubernetes.Version, ">=", kubernetesVersion)
+ if err != nil {
+ return false
+ }
+
+ return constraint
+ }
+
+ return predicate.Funcs{
+ CreateFunc: func(event event.CreateEvent) bool {
+ return f(event.Object)
+ },
+ UpdateFunc: func(event event.UpdateEvent) bool {
+ return f(event.ObjectNew)
+ },
+ GenericFunc: func(event event.GenericEvent) bool {
+ return f(event.Object)
+ },
+ DeleteFunc: func(event event.DeleteEvent) bool {
+ return f(event.Object)
+ },
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/util/clientset.go b/vendor/github.com/gardener/gardener/extensions/pkg/util/clientset.go
new file mode 100644
index 0000000..a3612e5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/util/clientset.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ componentbaseconfig "k8s.io/component-base/config"
+)
+
+// NewRESTConfigFromKubeconfig creates a new REST config from a given Kubeconfig and returns it.
+func NewRESTConfigFromKubeconfig(kubeconfig []byte) (*rest.Config, error) {
+ configObj, err := clientcmd.Load(kubeconfig)
+ if err != nil {
+ return nil, err
+ }
+ clientConfig := clientcmd.NewDefaultClientConfig(*configObj, &clientcmd.ConfigOverrides{})
+
+ return createRESTConfig(clientConfig, nil)
+}
+
+// ApplyClientConnectionConfigurationToRESTConfig applies the given client connection configurations to the given
+// REST config.
+func ApplyClientConnectionConfigurationToRESTConfig(clientConnection *componentbaseconfig.ClientConnectionConfiguration, rest *rest.Config) {
+ if clientConnection == nil {
+ return
+ }
+
+ rest.AcceptContentTypes = clientConnection.AcceptContentTypes
+ rest.ContentType = clientConnection.ContentType
+ rest.Burst = int(clientConnection.Burst)
+ rest.QPS = clientConnection.QPS
+}
+
+// createRESTConfig creates a Config object for a rest client. If a clientConnection configuration object is passed
+// as well then the specified fields will be taken over as well.
+func createRESTConfig(clientConfig clientcmd.ClientConfig, clientConnection *componentbaseconfig.ClientConnectionConfiguration) (*rest.Config, error) {
+ config, err := clientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ if clientConnection != nil {
+ config.Burst = int(clientConnection.Burst)
+ config.QPS = clientConnection.QPS
+ config.AcceptContentTypes = clientConnection.AcceptContentTypes
+ config.ContentType = clientConnection.ContentType
+ }
+
+ return config, nil
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/util/serialization.go b/vendor/github.com/gardener/gardener/extensions/pkg/util/serialization.go
new file mode 100644
index 0000000..12600a7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/util/serialization.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Decode takes a `decoder` and decodes the provided `data` into the provided object.
+// The underlying `into` address is used to assign the decoded object.
+func Decode(decoder runtime.Decoder, data []byte, into runtime.Object) error {
+ // By not providing an `into` it is necessary that the serialized `data` is configured with
+ // a proper `apiVersion` and `kind` field. This also makes sure that the conversion logic to
+ // the internal version is called.
+ output, _, err := decoder.Decode(data, nil, nil)
+ if err != nil {
+ return err
+ }
+
+ intoType := reflect.TypeOf(into)
+
+ if reflect.TypeOf(output) == intoType {
+ reflect.ValueOf(into).Elem().Set(reflect.ValueOf(output).Elem())
+ return nil
+ }
+
+ return fmt.Errorf("is not of type %s", intoType)
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go
new file mode 100644
index 0000000..3f55bcb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go
@@ -0,0 +1,128 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "context"
+ "fmt"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ "github.com/gardener/gardener/pkg/utils"
+ "github.com/gardener/gardener/pkg/utils/secrets"
+
+ "github.com/Masterminds/semver"
+ "github.com/pkg/errors"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/version"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+// CAChecksumAnnotation is a resource annotation used to store the checksum of a certificate authority.
+const CAChecksumAnnotation = "checksum/ca"
+
+// GetOrCreateShootKubeconfig gets or creates a Kubeconfig for a Shoot cluster which has a running control plane in the given `namespace`.
+// If the CA of an existing Kubeconfig has changed, it creates a new Kubeconfig.
+// Newly generated Kubeconfigs are applied with the given `client` to the given `namespace`.
+func GetOrCreateShootKubeconfig(ctx context.Context, c client.Client, certificateConfig secrets.CertificateSecretConfig, namespace string) (*corev1.Secret, error) {
+ caSecret, ca, err := secrets.LoadCAFromSecret(ctx, c, namespace, v1beta1constants.SecretNameCACluster)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching CA secret %s/%s: %v", namespace, v1beta1constants.SecretNameCACluster, err)
+ }
+
+ var (
+ secret = corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: make(map[string]string),
+ Name: certificateConfig.Name,
+ Namespace: namespace,
+ },
+ }
+ key = types.NamespacedName{
+ Name: certificateConfig.Name,
+ Namespace: namespace,
+ }
+ )
+ if err := c.Get(ctx, key, &secret); client.IgnoreNotFound(err) != nil {
+ return nil, fmt.Errorf("error preparing kubeconfig: %v", err)
+ }
+
+ var (
+ computedChecksum = utils.ComputeChecksum(caSecret.Data)
+ storedChecksum, ok = secret.Annotations[CAChecksumAnnotation]
+ )
+ if ok && computedChecksum == storedChecksum {
+ return &secret, nil
+ }
+
+ certificateConfig.SigningCA = ca
+ certificateConfig.CertType = secrets.ClientCert
+
+ config := secrets.ControlPlaneSecretConfig{
+ CertificateSecretConfig: &certificateConfig,
+
+ KubeConfigRequest: &secrets.KubeConfigRequest{
+ ClusterName: namespace,
+ APIServerURL: kubeAPIServerServiceDNS(namespace),
+ },
+ }
+
+ controlPlane, err := config.GenerateControlPlane()
+ if err != nil {
+ return nil, fmt.Errorf("error creating kubeconfig: %v", err)
+ }
+
+ _, err = controllerutil.CreateOrUpdate(ctx, c, &secret, func() error {
+ secret.Data = controlPlane.SecretData()
+ if secret.Annotations == nil {
+ secret.Annotations = make(map[string]string)
+ }
+ secret.Annotations[CAChecksumAnnotation] = computedChecksum
+ return nil
+ })
+
+ return &secret, err
+}
+
+// kubeAPIServerServiceDNS returns a domain name which can be used to contact
+// the Kube-Apiserver deployment of a Shoot within the Seed cluster.
+// e.g. kube-apiserver.shoot--project--prod.svc.cluster.local.
+func kubeAPIServerServiceDNS(namespace string) string {
+ return fmt.Sprintf("%s.%s", v1beta1constants.DeploymentNameKubeAPIServer, namespace)
+}
+
+// VersionMajorMinor extracts and returns the major and the minor part of the given version (input must be a semantic version).
+func VersionMajorMinor(version string) (string, error) {
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ return "", errors.Wrapf(err, "Invalid version string '%s'", version)
+ }
+ return fmt.Sprintf("%d.%d", v.Major(), v.Minor()), nil
+}
+
+// VersionInfo converts the given version string to version.Info (input must be a semantic version).
+func VersionInfo(vs string) (*version.Info, error) {
+ v, err := semver.NewVersion(vs)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Invalid version string '%s'", vs)
+ }
+ return &version.Info{
+ Major: fmt.Sprintf("%d", v.Major()),
+ Minor: fmt.Sprintf("%d", v.Minor()),
+ GitVersion: fmt.Sprintf("v%d.%d.%d", v.Major(), v.Minor(), v.Patch()),
+ }, nil
+}
diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go
new file mode 100644
index 0000000..9d06eb7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "context"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ "github.com/gardener/gardener/pkg/chartrenderer"
+ gardenerkubernetes "github.com/gardener/gardener/pkg/client/kubernetes"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+ "github.com/gardener/gardener/pkg/utils/secrets"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/version"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// ShootClients bundles together several clients for the shoot cluster.
+type ShootClients interface {
+ Client() client.Client
+ Clientset() kubernetes.Interface
+ GardenerClientset() gardenerkubernetes.Interface
+ ChartApplier() gardenerkubernetes.ChartApplier
+ Version() *version.Info
+}
+
+type shootClients struct {
+ c client.Client
+ clientset kubernetes.Interface
+ gardenerClientset gardenerkubernetes.Interface
+ chartApplier gardenerkubernetes.ChartApplier
+ version *version.Info
+}
+
+func (s *shootClients) Client() client.Client { return s.c }
+func (s *shootClients) Clientset() kubernetes.Interface { return s.clientset }
+func (s *shootClients) GardenerClientset() gardenerkubernetes.Interface { return s.gardenerClientset }
+func (s *shootClients) ChartApplier() gardenerkubernetes.ChartApplier { return s.chartApplier }
+func (s *shootClients) Version() *version.Info { return s.version }
+
+// NewShootClients creates a new shoot client interface based on the given clients.
+func NewShootClients(c client.Client, clientset kubernetes.Interface, gardenerClientset gardenerkubernetes.Interface, chartApplier gardenerkubernetes.ChartApplier, version *version.Info) ShootClients {
+ return &shootClients{
+ c: c,
+ clientset: clientset,
+ gardenerClientset: gardenerClientset,
+ chartApplier: chartApplier,
+ version: version,
+ }
+}
+
+// NewClientForShoot returns the rest config and the client for the given shoot namespace.
+func NewClientForShoot(ctx context.Context, c client.Client, namespace string, opts client.Options) (*rest.Config, client.Client, error) {
+ var (
+ gardenerSecret = &corev1.Secret{}
+ err error
+ )
+
+ if err = c.Get(ctx, kutil.Key(namespace, v1beta1constants.SecretNameGardenerInternal), gardenerSecret); err != nil && apierrors.IsNotFound(err) {
+ err = c.Get(ctx, kutil.Key(namespace, v1beta1constants.SecretNameGardener), gardenerSecret)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ shootRESTConfig, err := NewRESTConfigFromKubeconfig(gardenerSecret.Data[secrets.DataKeyKubeconfig])
+ if err != nil {
+ return nil, nil, err
+ }
+ shootClient, err := client.New(shootRESTConfig, opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ return shootRESTConfig, shootClient, nil
+}
+
+// NewClientsForShoot is a utility function that creates a new clientset and a chart applier for the shoot cluster.
+// It uses the 'gardener' secret in the given shoot namespace. It also returns the Kubernetes version of the cluster.
+func NewClientsForShoot(ctx context.Context, c client.Client, namespace string, opts client.Options) (ShootClients, error) {
+ shootRESTConfig, shootClient, err := NewClientForShoot(ctx, c, namespace, opts)
+ if err != nil {
+ return nil, err
+ }
+ shootClientset, err := kubernetes.NewForConfig(shootRESTConfig)
+ if err != nil {
+ return nil, err
+ }
+ shootGardenerClientset, err := gardenerkubernetes.NewWithConfig(gardenerkubernetes.WithRESTConfig(shootRESTConfig), gardenerkubernetes.WithClientOptions(opts))
+ if err != nil {
+ return nil, err
+ }
+ shootVersion, err := shootClientset.Discovery().ServerVersion()
+ if err != nil {
+ return nil, err
+ }
+ shootChartApplier := shootGardenerClientset.ChartApplier()
+
+ return &shootClients{
+ c: shootClient,
+ clientset: shootClientset,
+ gardenerClientset: shootGardenerClientset,
+ chartApplier: shootChartApplier,
+ version: shootVersion,
+ }, nil
+}
+
+// NewChartRendererForShoot creates a new chartrenderer.Interface for the shoot cluster.
+func NewChartRendererForShoot(version string) (chartrenderer.Interface, error) {
+ v, err := VersionInfo(version)
+ if err != nil {
+ return nil, err
+ }
+ return chartrenderer.NewWithServerVersion(v), nil
+}
diff --git a/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor b/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor
new file mode 100644
index 0000000..7f62442
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+set -e
+
+repo_root_dir="$1"
+repo_name="${2:-github.com/gardener/gardener}"
+descriptor_out_file="${COMPONENT_DESCRIPTOR_PATH}"
+
+echo "enriching creating component descriptor from ${BASE_DEFINITION_PATH}"
+
+if [[ -f "$repo_root_dir/charts/images.yaml" ]]; then
+ images="$(yaml2json < "$repo_root_dir/charts/images.yaml")"
+ eval "$(jq -r ".images |
+ map(select(.sourceRepository != \"$repo_name\") |
+ if (.name == \"hyperkube\" or .name == \"kube-apiserver\" or .name == \"kube-controller-manager\" or .name == \"kube-scheduler\" or .name == \"kube-proxy\" or .repository == \"k8s.gcr.io/hyperkube\") then
+ \"--generic-dependencies '{\\\"name\\\": \\\"\" + .name + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\"
+ elif (.repository | startswith(\"eu.gcr.io/gardener-project/gardener\")) then
+ \"--component-dependencies '{\\\"name\\\": \\\"\" + .sourceRepository + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\"
+ else
+ \"--container-image-dependencies '{\\\"name\\\": \\\"\" + .name + \"\\\", \\\"image_reference\\\": \\\"\" + .repository + \":\" + .tag + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\"
+ end) |
+ \"${ADD_DEPENDENCIES_CMD} \\\\\n\" +
+ join(\" \\\\\n\")" <<< "$images")"
+fi
+
+if [[ -d "$repo_root_dir/charts/" ]]; then
+ for image_tpl_path in "$repo_root_dir/charts/"*"/templates/_images.tpl"; do
+ if [[ ! -f "$image_tpl_path" ]]; then
+ continue
+ fi
+
+ outputFile=$(sed 's/{{-//' $image_tpl_path | sed 's/}}//' | sed 's/define//' | sed 's/-//' | sed 's/end//' | sed 's/"//' | sed 's/"//' |sed 's/image.//' | sed -e 's/^[ \t]*//' | awk -v RS= '{for (i=1; i<=NF; i++) printf "%s%s", $i, (i==NF?"\n":" ")}')
+ echo "enriching creating component descriptor from ${image_tpl_path}"
+
+ while read p; do
+ line="$(echo -e "$p")"
+ IFS=' ' read -r -a array <<< "$line"
+ IFS=': ' read -r -a imageAndTag <<< ${array[1]}
+
+ NAME=${array[0]}
+ REPOSITORY=${imageAndTag[0]}
+ TAG=${imageAndTag[1]}
+
+ gardener="eu.gcr.io/gardener-project/gardener"
+ if [[ "$NAME" == "hyperkube" ]]; then
+ ${ADD_DEPENDENCIES_CMD} --generic-dependencies "{\"name\": \"$NAME\", \"version\": \"$TAG\"}"
+ elif [[ $REPOSITORY =~ "eu.gcr.io/gardener-project/gardener"* ]]; then
+ ${ADD_DEPENDENCIES_CMD} --generic-dependencies "{\"name\": \"$NAME\", \"version\": \"$TAG\"}"
+ else
+ ${ADD_DEPENDENCIES_CMD} --container-image-dependencies "{\"name\": \"${NAME}\", \"image_reference\": \"${REPOSITORY}:${TAG}\", \"version\": \"$TAG\"}"
+ fi
+ done < <(echo "$outputFile")
+ done
+fi
+
+cp "${BASE_DEFINITION_PATH}" "${descriptor_out_file}"
diff --git a/vendor/github.com/gardener/gardener/hack/.ci/doc.go b/vendor/github.com/gardener/gardener/hack/.ci/doc.go
new file mode 100644
index 0000000..6449330
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/.ci/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package imports CI related scripts - it is to force `go mod` to see them as dependencies.
+package ci
diff --git a/vendor/github.com/gardener/gardener/hack/.ci/prepare_release b/vendor/github.com/gardener/gardener/hack/.ci/prepare_release
new file mode 100644
index 0000000..882afd4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/.ci/prepare_release
@@ -0,0 +1,76 @@
+#!/usr/bin/env sh
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+repo_root_dir="$1"
+repo_base="$2"
+repo_name="$3"
+
+apk update
+apk add --no-cache \
+ ca-certificates \
+ make \
+ bash \
+ go \
+ git \
+ musl-dev \
+ curl \
+ openssl \
+ tar \
+ gzip \
+ gcc \
+ sed
+
+GOLANG_VERSION="$(sed -rn 's/FROM (eu\.gcr\.io\/gardener-project\/3rd\/golang|golang):([^ ]+).*/\2/p' < "$repo_root_dir/Dockerfile")"
+
+export \
+ GOROOT="$(go env GOROOT)" \
+ GOOS="$(go env GOOS)" \
+ GOARCH="$(go env GOARCH)" \
+ GOHOSTOS="$(go env GOHOSTOS)" \
+ GOHOSTARCH="$(go env GOHOSTARCH)"
+
+echo "Downloading go $GOLANG_VERSION"
+wget -q -O - "https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz" | tar zx -C /usr/local
+cd /usr/local/go/src
+echo "Executing make on go $GOLANG_VERSION"
+./make.bash > /dev/null 2>&1
+
+export GOPATH="$(mktemp -d)"
+export GOBIN="$GOPATH/bin"
+export PATH="$GOBIN:$PATH"
+
+REPO_BASE="$GOPATH/src/$repo_base"
+mkdir -p "$REPO_BASE"
+REPO_PATH="$REPO_BASE/$repo_name"
+cp -R "$repo_root_dir" "$REPO_PATH"
+
+current_dir="$(pwd)"
+cd "$REPO_PATH"
+make install-requirements
+cd "$current_dir"
+
+echo "$EFFECTIVE_VERSION" > "$REPO_PATH/VERSION"
+cur_dir="$(pwd)"
+cd "$REPO_PATH"
+if ! make generate; then
+ cd "$cur_dir"
+ exit 1
+fi
+cd "$cur_dir"
+cp -RT "$REPO_PATH/" "$repo_root_dir/"
+
diff --git a/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version b/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version
new file mode 100644
index 0000000..0e149c7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version
@@ -0,0 +1,125 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pathlib
+import yaml
+
+import util
+import product.model
+
+dependency_type = util.check_env('DEPENDENCY_TYPE')
+if not dependency_type == 'component':
+ util.fail('don\'t know how to upgrade dependency type: ' + str(dependency_type))
+
+component_reference = product.model.ComponentReference.create(
+ name=util.check_env('DEPENDENCY_NAME'),
+ version=util.check_env('DEPENDENCY_VERSION'),
+)
+
+images_file = pathlib.Path(
+ util.check_env('REPO_DIR'),
+ 'charts',
+ 'images.yaml',
+)
+
+class ImagesParser(object):
+ '''
+ a naive YAML-parser crafted for the special case of processing
+ gardener's images.yaml file; crafted that way to preserve comments/empty lines
+ '''
+ def __init__(
+ self,
+ images_file,
+ names,
+ target_version,
+ ):
+ self.images_file = images_file
+ self.lines = images_file.read_text().split('\n')
+ self.names = names
+ self.target_version = target_version
+ self._line_idx = 0
+
+ def _line(self):
+ return self.lines[self._line_idx]
+
+ def _next_line(self):
+ self._line_idx += 1
+ return self._line()
+
+ def _skip_to_next_entry(self, names):
+ while not self._line().startswith('-'):
+ self._next_line()
+ name = self._line().strip().split(':')[-1].strip()
+
+ if name not in names:
+ self._next_line()
+ return self._skip_to_next_entry(names)
+
+ # found one of the entries:
+ return name
+
+ def _skip_to_next_tag(self):
+ self._next_line()
+ while not self._line().startswith('-'):
+ if self._line().strip().startswith('tag:'):
+ return
+ self._next_line()
+ raise RuntimeError('did not find tag attribute')
+
+ def set_versions(self):
+ while self.names:
+ try:
+ name = self._skip_to_next_entry(self.names)
+ except IndexError:
+ print(str(self.names))
+ util.fail('don\'t know how to update ' + str(self.names))
+ self.names.remove(name)
+ self._skip_to_next_tag()
+ tag_line = self._line()
+ indent = len(tag_line) - len(tag_line.lstrip())
+ patched_line = ' ' * indent + 'tag: "{version}"'.format(version=self.target_version)
+ self.lines[self._line_idx] = patched_line
+
+ def write_updated_file(self):
+ self.images_file.write_text(
+ '\n'.join(self.lines)
+ )
+
+
+# handle special cases
+name = component_reference.github_repo()
+if name == 'autoscaler':
+ names = ['cluster-autoscaler']
+elif name == 'vpn':
+ names = ['vpn-seed', 'vpn-shoot']
+elif name == 'external-dns-management':
+ names = ['dns-controller-manager']
+elif name == 'logging':
+ names = ['fluent-bit-plugin-installer']
+elif name == 'etcd-custom-image':
+ names = ['etcd']
+else:
+ names = [name]
+
+
+parser = ImagesParser(
+ images_file=images_file,
+ names=names,
+ target_version=str(component_reference.version()),
+)
+
+parser.set_versions()
+parser.write_updated_file()
diff --git a/vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt b/vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt
new file mode 100644
index 0000000..e12758c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/LICENSE_BOILERPLATE.txt
@@ -0,0 +1,15 @@
+/*
+Copyright (c) YEAR SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
diff --git a/vendor/github.com/gardener/gardener/hack/api-reference/template/members.tpl b/vendor/github.com/gardener/gardener/hack/api-reference/template/members.tpl
new file mode 100644
index 0000000..9f08d1a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/api-reference/template/members.tpl
@@ -0,0 +1,48 @@
+{{ define "members" }}
+
+{{ range .Members }}
+{{ if not (hiddenMember .)}}
+
+
+ {{ fieldName . }}
+
+ {{ if linkForType .Type }}
+
+ {{ typeDisplayName .Type }}
+
+ {{ else }}
+ {{ typeDisplayName .Type }}
+ {{ end }}
+
+ |
+
+ {{ if fieldEmbedded . }}
+
+ (Members of {{ fieldName . }} are embedded into this type.)
+
+ {{ end}}
+
+ {{ if isOptionalMember .}}
+ (Optional)
+ {{ end }}
+
+ {{ safe (renderComments .CommentLines) }}
+
+ {{ if and (eq (.Type.Name.Name) "ObjectMeta") }}
+ Refer to the Kubernetes API documentation for the fields of the
+ metadata field.
+ {{ end }}
+
+ {{ if or (eq (fieldName .) "spec") }}
+
+
+
+ {{ template "members" .Type }}
+
+ {{ end }}
+ |
+
+{{ end }}
+{{ end }}
+
+{{ end }}
diff --git a/vendor/github.com/gardener/gardener/hack/api-reference/template/pkg.tpl b/vendor/github.com/gardener/gardener/hack/api-reference/template/pkg.tpl
new file mode 100644
index 0000000..c06bfe5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/api-reference/template/pkg.tpl
@@ -0,0 +1,48 @@
+{{ define "packages" }}
+
+{{ with .packages}}
+Packages:
+
+{{ end}}
+
+{{ range .packages }}
+
+ {{- packageDisplayName . -}}
+
+
+ {{ with (index .GoPackages 0 )}}
+ {{ with .DocComments }}
+
+ {{ safe (renderComments .) }}
+
+ {{ end }}
+ {{ end }}
+
+ Resource Types:
+
+ {{- range (visibleTypes (sortedTypes .Types)) -}}
+ {{ if isExportedType . -}}
+ -
+ {{ typeDisplayName . }}
+
+ {{- end }}
+ {{- end -}}
+
+
+ {{ range (visibleTypes (sortedTypes .Types))}}
+ {{ template "type" . }}
+ {{ end }}
+
+{{ end }}
+
+
+ Generated with gen-crd-api-reference-docs
+
+
+{{ end }}
diff --git a/vendor/github.com/gardener/gardener/hack/api-reference/template/tools.go b/vendor/github.com/gardener/gardener/hack/api-reference/template/tools.go
new file mode 100644
index 0000000..87d0d26
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/api-reference/template/tools.go
@@ -0,0 +1,18 @@
+// +build tools
+
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package imports things required by build scripts, to force `go mod` to see them as dependencies
+package template
diff --git a/vendor/github.com/gardener/gardener/hack/api-reference/template/type.tpl b/vendor/github.com/gardener/gardener/hack/api-reference/template/type.tpl
new file mode 100644
index 0000000..e28b088
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/api-reference/template/type.tpl
@@ -0,0 +1,58 @@
+{{ define "type" }}
+
+
+ {{- .Name.Name }}
+ {{ if eq .Kind "Alias" }}({{.Underlying}} alias){{ end -}}
+
+{{ with (typeReferences .) }}
+
+ (Appears on:
+ {{- $prev := "" -}}
+ {{- range . -}}
+ {{- if $prev -}}, {{ end -}}
+ {{ $prev = . }}
+ {{ typeDisplayName . }}
+ {{- end -}}
+ )
+
+{{ end }}
+
+
+
+ {{ safe (renderComments .CommentLines) }}
+
+
+{{ if .Members }}
+
+
+
+ | Field |
+ Description |
+
+
+
+ {{ if isExportedType . }}
+
+
+ apiVersion
+ string |
+
+
+ {{apiGroup .}}
+
+ |
+
+
+
+ kind
+ string
+ |
+ {{.Name.Name}} |
+
+ {{ end }}
+ {{ template "members" .}}
+
+
+{{ end }}
+
+{{ end }}
diff --git a/vendor/github.com/gardener/gardener/hack/check-charts.sh b/vendor/github.com/gardener/gardener/hack/check-charts.sh
new file mode 100755
index 0000000..46e1c8c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/check-charts.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+echo "> Check Helm charts"
+
+if [[ -d "$1" ]]; then
+ echo "Checking for chart symlink errors"
+ BROKEN_SYMLINKS=$(find -L $1 -type l)
+ if [[ "$BROKEN_SYMLINKS" ]]; then
+ echo "Found broken symlinks:"
+ echo "$BROKEN_SYMLINKS"
+ exit 1
+ fi
+ echo "Checking whether all charts can be rendered"
+ for chart_dir in $(find charts -type d -exec test -f '{}'/Chart.yaml \; -print -prune | sort); do
+ [ -f "$chart_dir/values-test.yaml" ] && values_files="-f $chart_dir/values-test.yaml" || unset values_files
+ helm template $values_files "$chart_dir" 1> /dev/null
+ done
+fi
+
+echo "All checks successful"
diff --git a/vendor/github.com/gardener/gardener/hack/check-generate.sh b/vendor/github.com/gardener/gardener/hack/check-generate.sh
new file mode 100755
index 0000000..1bb0da3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/check-generate.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+echo "> Generate / Vendor Check"
+
+makefile="$1/Makefile"
+check_branch="__check"
+initialized_git=false
+stashed=false
+checked_out=false
+generated=false
+vendored=false
+
+function delete-check-branch {
+ git rev-parse --verify "$check_branch" &>/dev/null && git branch -q -D "$check_branch" || :
+}
+
+function cleanup {
+ if [[ "$generated" == true ]] || [[ "$vendored" == true ]]; then
+ if ! clean_err="$(make -f "$makefile" clean && git reset --hard -q && git clean -qdf)"; then
+ echo "Could not clean: $clean_err"
+ fi
+ fi
+
+ if [[ "$checked_out" == true ]]; then
+ if ! checkout_err="$(git checkout -q -)"; then
+ echo "Could not checkout to previous branch: $checkout_err"
+ fi
+ fi
+
+ if [[ "$stashed" == true ]]; then
+ if ! stash_err="$(git stash pop -q)"; then
+ echo "Could not pop stash: $stash_err"
+ fi
+ fi
+
+ if [[ "$initialized_git" == true ]]; then
+ if ! rm_err="$(rm -rf .git)"; then
+ echo "Could not delete git directory: $rm_err"
+ fi
+ fi
+
+ delete-check-branch
+}
+
+trap cleanup EXIT SIGINT SIGTERM
+
+if which git &>/dev/null; then
+ if ! git rev-parse --git-dir &>/dev/null; then
+ initialized_git=true
+ git init -q
+ git add --all
+ git config --global user.name 'Gardener'
+ git config --global user.email 'gardener@cloud'
+ git commit -q --allow-empty -m 'initial commit'
+ fi
+
+ if [[ "$(git rev-parse --abbrev-ref HEAD)" == "$check_branch" ]]; then
+ echo "Already on check branch, aborting"
+ exit 1
+ fi
+ delete-check-branch
+
+ if [[ "$(git status -s)" != "" ]]; then
+ stashed=true
+ git stash --include-untracked -q
+ git stash apply -q &>/dev/null
+ fi
+
+ checked_out=true
+ git checkout -q -b "$check_branch"
+ git add --all
+ git commit -q --allow-empty -m 'checkpoint'
+
+ old_status="$(git status -s)"
+ if ! out=$(make -f "$makefile" clean 2>&1); then
+ echo "Error during calling make clean: $out"
+ exit 1
+ fi
+
+ echo ">> make generate"
+ generated=true
+ if ! out=$(make -f "$makefile" generate 2>&1); then
+ echo "Error during calling make generate: $out"
+ exit 1
+ fi
+ new_status="$(git status -s)"
+
+ if [[ "$old_status" != "$new_status" ]]; then
+ echo "make generate needs to be run:"
+ echo "$new_status"
+ exit 1
+ fi
+
+ echo ">> make revendor"
+ vendored=true
+ if ! out=$(make -f "$makefile" revendor 2>&1); then
+ echo "Error during calling make revendor: $out"
+ exit 1
+ fi
+ new_status="$(git status -s)"
+
+ if [[ "$old_status" != "$new_status" ]]; then
+ echo "make revendor needs to be run:"
+ echo "$new_status"
+ exit 1
+ fi
+else
+ echo "No git detected, cannot run vendor check"
+fi
+exit 0
diff --git a/vendor/github.com/gardener/gardener/hack/check.sh b/vendor/github.com/gardener/gardener/hack/check.sh
new file mode 100755
index 0000000..188a3e0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/check.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+GOLANGCI_LINT_CONFIG_FILE=""
+
+for arg in "$@"; do
+ case $arg in
+ --golangci-lint-config=*)
+ GOLANGCI_LINT_CONFIG_FILE="-c ${arg#*=}"
+ shift
+ ;;
+ esac
+done
+
+echo "> Check"
+
+echo "Executing golangci-lint"
+golangci-lint run $GOLANGCI_LINT_CONFIG_FILE --timeout 10m $@
+
+echo "Executing go vet"
+go vet -mod=vendor $@
+
+echo "Executing gofmt/goimports"
+folders=()
+for f in $@; do
+ folders+=( "$(echo $f | sed 's/\.\/\(.*\)\/\.\.\./\1/')" )
+done
+unformatted_files="$(goimports -l ${folders[*]})"
+if [[ "$unformatted_files" ]]; then
+ echo "Unformatted files detected:"
+ echo "$unformatted_files"
+ exit 1
+fi
+
+echo "All checks successful"
diff --git a/vendor/github.com/gardener/gardener/hack/clean.sh b/vendor/github.com/gardener/gardener/hack/clean.sh
new file mode 100755
index 0000000..c2b2a06
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/clean.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+echo "> Clean"
+
+for source_tree in $@; do
+ find "$(dirname "$source_tree")" -type f -name "zz_*.go" -exec rm '{}' \;
+ grep -lr --include="*.go" "//go:generate packr2" . | xargs -I {} packr2 clean "{}/.."
+done
diff --git a/vendor/github.com/gardener/gardener/hack/format.sh b/vendor/github.com/gardener/gardener/hack/format.sh
new file mode 100755
index 0000000..de42f3d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/format.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+echo "> Format"
+
+goimports -l -w $@
diff --git a/vendor/github.com/gardener/gardener/hack/generate-controller-registration.sh b/vendor/github.com/gardener/gardener/hack/generate-controller-registration.sh
new file mode 100755
index 0000000..0c8c81f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/generate-controller-registration.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+set -o pipefail
+
+function usage {
+ cat < [kinds-and-types ...]
+
+ Name of the controller registration to generate.
+ Location of the chart directory.
+ Location of the VERSION file.
+ The destination file to write the registration YAML to.
+ A tuple of kind and type of the controller registration to generate.
+ Separated by ':'.
+ Example: OperatingSystemConfig:foobar
+ Further tuples of kind and type of the controller registration to generate.
+ Separated by ':'.
+EOM
+ exit 0
+}
+
+if [ "$1" == "--optional" ]; then
+ shift
+ MODE=$'\n globallyEnabled: false'
+fi
+NAME="$1"
+CHART_DIR="$2"
+VERSION_FILE="$3"
+DEST="$4"
+KIND_AND_TYPE="$5"
+
+VERSION="$(cat "$VERSION_FILE")"
+
+( [[ -z "$NAME" ]] || [[ -z "$CHART_DIR" ]] || [[ -z "$DEST" ]] || [[ -z "$KIND_AND_TYPE" ]]) && usage
+
+KINDS_AND_TYPES=("$KIND_AND_TYPE" "${@:6}")
+
+# The following code is to make `helm package` idempotent: Usually, everytime `helm package` is invoked,
+# it produces a different `.tgz` due to modification timestamps and some special shasums of gzip. We
+# resolve this by unarchiving the `.tgz`, compressing it again with a constant `mtime` and no gzip
+# checksums.
+temp_dir="$(mktemp -d)"
+temp_helm_home="$(mktemp -d)"
+temp_extract_dir="$(mktemp -d)"
+function cleanup {
+ rm -rf "$temp_dir"
+ rm -rf "$temp_helm_home"
+ rm -rf "$temp_extract_dir"
+}
+trap cleanup EXIT ERR INT TERM
+
+export HELM_HOME="$temp_helm_home"
+[ "$(helm version --client --template "{{.Version}}" | head -c2 | tail -c1)" = "3" ] || helm init --client-only > /dev/null 2>&1
+helm package "$CHART_DIR" --version "$VERSION" --app-version "$VERSION" --destination "$temp_dir" > /dev/null
+gtar -xzm -C "$temp_extract_dir" -f "$temp_dir"/*
+chart="$(gtar --sort=name -c --owner=root:0 --group=root:0 --mtime='UTC 2019-01-01' -C "$temp_extract_dir" "$(basename "$temp_extract_dir"/*)" | gzip -n | base64 | tr -d '\n')"
+
+mkdir -p "$(dirname "$DEST")"
+
+cat < "$DEST"
+---
+apiVersion: core.gardener.cloud/v1beta1
+kind: ControllerRegistration
+metadata:
+ name: $NAME
+spec:
+ resources:
+EOM
+
+for kind_and_type in "${KINDS_AND_TYPES[@]}"; do
+ KIND="$(echo "$kind_and_type" | cut -d ':' -f 1)"
+ TYPE="$(echo "$kind_and_type" | cut -d ':' -f 2)"
+ cat <> "$DEST"
+ - kind: $KIND
+ type: $TYPE$MODE
+EOM
+done
+
+cat <> "$DEST"
+ deployment:
+ type: helm
+ providerConfig:
+ chart: $chart
+ values:
+ image:
+ tag: $VERSION
+EOM
+
+echo "Successfully generated controller registration at $DEST"
diff --git a/vendor/github.com/gardener/gardener/hack/generate.sh b/vendor/github.com/gardener/gardener/hack/generate.sh
new file mode 100755
index 0000000..0b3338a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/generate.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+echo "> Generate"
+
+GO111MODULE=on go generate -mod=vendor $@
diff --git a/vendor/github.com/gardener/gardener/hack/get-build-ld-flags.sh b/vendor/github.com/gardener/gardener/hack/get-build-ld-flags.sh
new file mode 100755
index 0000000..b2037ec
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/get-build-ld-flags.sh
@@ -0,0 +1,33 @@
+#!/bin/bash -e
+#
+# Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SOURCE_REPOSITORY="${1:-github.com/gardener/gardener}"
+VERSION_PATH="${2:-$(dirname $0)/../VERSION}"
+VERSION_VERSIONFILE="$(cat "$VERSION_PATH")"
+VERSION="${EFFECTIVE_VERSION:-$VERSION_VERSIONFILE}"
+
+# .dockerignore ignores all files unrelevant for build (e.g. docs) to only copy relevant source files to the build
+# container. Hence, git will always detect a dirty work tree when building in a container (many deleted files).
+# This command filters out all deleted files that are ignored by .dockerignore to only detect changes to relevant files
+# as a dirty work tree.
+# Additionally, it filters out changes to the `VERSION` file, as this is currently the only way to inject the
+# version-to-build in our pipelines (see https://github.com/gardener/cc-utils/issues/431).
+TREE_STATE="$([ -z "$(git status --porcelain 2>/dev/null | grep -vf <(git ls-files --deleted --ignored --exclude-from=.dockerignore) -e 'VERSION')" ] && echo clean || echo dirty)"
+
+echo "-X $SOURCE_REPOSITORY/pkg/version.gitVersion=$VERSION
+ -X $SOURCE_REPOSITORY/pkg/version.gitTreeState=$TREE_STATE
+ -X $SOURCE_REPOSITORY/pkg/version.gitCommit=$(git rev-parse --verify HEAD)
+ -X $SOURCE_REPOSITORY/pkg/version.buildDate=$(date '+%Y-%m-%dT%H:%M:%S%z' | sed 's/\([0-9][0-9]\)$/:\1/g')"
diff --git a/vendor/github.com/gardener/gardener/hack/hook-me.sh b/vendor/github.com/gardener/gardener/hack/hook-me.sh
new file mode 100755
index 0000000..0ec6458
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/hook-me.sh
@@ -0,0 +1,306 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+checkPrereqs() {
+ command -v host > /dev/null || echo "please install host command for lookup"
+ command -v inlets > /dev/null || echo "please install the inlets command. For mac, simply use \`brew install inlets\`, for linux \`curl -sLS https://get.inlets.dev | sudo sh\`"
+}
+
+createOrUpdateWebhookSVC(){
+namespace=${1:-}
+[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
+
+providerName=${2:-}
+[[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
+
+tmpService=$(mktemp)
+kubectl get svc gardener-extension-provider-$providerName -o yaml > $tmpService
+
+ cat <&1 > /dev/null
+ do
+ sleep 2s
+ done
+ echo $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).hostname }}")
+ ;;
+ *)
+ until host $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).ip }}") 2>&1 > /dev/null
+ do
+ sleep 2s
+ done
+ echo $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).ip }}") ;;
+ esac
+}
+
+createServerPod(){
+namespace=${1:-}
+[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
+
+providerName=${2:-}
+[[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
+
+cat </dev/null
+ exit 0
+}
+
+usage(){
+ echo "==================================================================DISCLAIMER============================================================================"
+ echo "This scripts needs to be run against the KUBECONFIG of a seed cluster, please set your KUBECONFIG accordingly"
+ echo "You also need to set the \`ignoreResources\` variable in your extension chart to \`true\`, generate and apply the corresponding controller-installation"
+ echo "========================================================================================================================================================"
+
+ echo ""
+
+ echo "===================================PRE-REQs========================================="
+ echo "\`host\` commands for DNS"
+ echo "\`inlets\` command. For mac, simply use \`brew install inlets\`, for linux \`curl -sLS https://get.inlets.dev | sudo sh\`"
+ echo "===================================================================================="
+
+ echo ""
+
+ echo "========================================================USAGE======================================================================"
+ echo "> ./hack/hook-me.sh "
+ echo "> \`make EXTENSION_NAMESPACE= WEBHOOK_CONFIG_MODE=service start\`"
+ echo "=================================================================================================================================="
+
+ echo ""
+
+ echo "===================================CLEAN UP COMMANDS========================================="
+ echo "> kubectl -n $namespace delete svc/inlets-lb"
+ echo "> kubectl -n $namespace delete pod/inlets-server"
+ echo "============================================================================================="
+
+ exit 0
+}
+if [[ "${BASH_SOURCE[0]}" = "$0" ]]; then
+
+ if [ "$1" == "-h" ] ; then
+ usage
+ fi
+
+ providerName=${1:-}
+ [[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
+
+ namespace=${2:-}
+ [[ -z $namespace ]] && echo "Please specify the extension namespace!" && exit 1
+
+ webhookServerPort=${3:-}
+ [[ -z $webhookServerPort ]] && echo "Please specify webhook server port" && exit 1
+
+
+ trap 'cleanUP $namespace' SIGINT SIGTERM
+
+ while true; do
+ read -p "[STEP 0] Have you already set the \`ignoreResources\` chart value to \`true\` for your extension controller-registration?" yn
+ case $yn in
+ [Yy]* )
+ echo "[STEP 1] Checking Pre-reqs!"
+ checkPrereqs
+
+ echo "[STEP 2] Creating Inlets LB Service..!"
+ createInletsLB $namespace && sleep 2s
+
+ echo "[STEP 3] Waiting for Inlets LB Service to be created..!";
+ output=$(waitForInletsLBToBeReady $namespace $providerName)
+ loadbalancerIPOrHostName=$(echo "$output" | tail -n1)
+ echo "[Info] LB IP is $loadbalancerIPOrHostName"
+
+ echo "[STEP 4] Creating the server Pod for TLS Termination and Tunneling connection..!";
+ createServerPod $namespace $providerName
+
+ echo "[STEP 5] Waiting for Inlets Pod to be ready..!";
+ waitForInletsPodToBeReady $namespace
+
+ echo "[STEP 6] Creating WebhookSVC LB..!"
+ createOrUpdateWebhookSVC $namespace $providerName
+
+ echo "[STEP 7] Initializing the inlets client";
+ echo "[Info] Inlets initialized, you are ready to go ahead and run \"make EXTENSION_NAMESPACE=$namespace WEBHOOK_CONFIG_MODE=service start\""
+ echo "[Info] It will take about 5 seconds for the connection to succeeed!"
+
+ inlets client --remote ws://$loadbalancerIPOrHostName:8000 --upstream https://localhost:$webhookServerPort --token=21d809ed61915c9177fbceeaa87e307e766be5f2
+ ;;
+ [Nn]* ) echo "You need to set \`ignoreResources\` to true and generate the controller installlation first in your extension chart before proceeding!"; exit;;
+ * ) echo "Please answer yes or no.";;
+ esac
+done
+fi
diff --git a/vendor/github.com/gardener/gardener/hack/install-requirements.sh b/vendor/github.com/gardener/gardener/hack/install-requirements.sh
new file mode 100755
index 0000000..0b34709
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/install-requirements.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+echo "> Installing requirements"
+
+GO111MODULE=off go get golang.org/x/tools/cmd/goimports
+
+export GO111MODULE=on
+curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.27.0
+curl -s "https://raw.githubusercontent.com/helm/helm/v2.17.0/scripts/get" | bash -s -- --version 'v2.17.0'
+
+if [[ "$(uname -s)" == *"Darwin"* ]]; then
+ cat < Install"
+
+LD_FLAGS="${LD_FLAGS:-$($(dirname $0)/get-build-ld-flags.sh)}"
+
+CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on \
+ go install -mod=vendor -ldflags "$LD_FLAGS" \
+ $@
diff --git a/vendor/github.com/gardener/gardener/hack/setup-envtest.sh b/vendor/github.com/gardener/gardener/hack/setup-envtest.sh
new file mode 100755
index 0000000..e984688
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/setup-envtest.sh
@@ -0,0 +1,109 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This file was copied and modified from the kubernetes-sigs/controller-runtime project
+# https://github.com/kubernetes-sigs/controller-runtime/blob/a9bd9117a77a2f84bbc546e28991136fe0000dc0/hack/setup-envtest.sh
+#
+# Modifications copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# Turn colors in this script off by setting the NO_COLOR variable in your
+# environment to any value:
+#
+# $ NO_COLOR=1 test.sh
+NO_COLOR=${NO_COLOR:-""}
+if [ -z "$NO_COLOR" ]; then
+ header=$'\e[1;33m'
+ reset=$'\e[0m'
+else
+ header=''
+ reset=''
+fi
+
+function header_text {
+ echo "$header$*$reset"
+}
+
+function setup_envtest_env {
+ header_text "setting up env vars"
+
+ # Setup env vars
+ KUBEBUILDER_ASSETS=${KUBEBUILDER_ASSETS:-""}
+ if [[ -z "${KUBEBUILDER_ASSETS}" ]]; then
+ export KUBEBUILDER_ASSETS="$1/bin"
+ fi
+}
+
+# Fetch k8s API gen tools and make it available under KUBEBUILDER_ASSETS.
+#
+# Skip fetching and untaring the tools by setting the SKIP_FETCH_TOOLS variable
+# in your environment to any value:
+#
+# $ SKIP_FETCH_TOOLS=1 ./check-everything.sh
+#
+# If you skip fetching tools, this script will use the tools already on your
+# machine.
+function fetch_envtest_tools {
+ SKIP_FETCH_TOOLS=${SKIP_FETCH_TOOLS:-""}
+ if [ -n "$SKIP_FETCH_TOOLS" ]; then
+ return 0
+ fi
+
+ tmp_root=/tmp
+ envtest_root_dir=$tmp_root/envtest
+
+ k8s_version="${ENVTEST_K8S_VERSION:-1.17.9}"
+ goarch="$(go env GOARCH)"
+ goos="$(go env GOOS)"
+
+ if [[ "$goos" != "linux" && "$goos" != "darwin" ]]; then
+ echo "OS '$goos' not supported. Aborting." >&2
+ return 1
+ fi
+
+ local dest_dir="${1}"
+
+ # use the pre-existing version in the temporary folder if it matches our k8s version
+ if [[ -x "${dest_dir}/bin/kube-apiserver" ]]; then
+ version=$("${dest_dir}"/bin/kube-apiserver --version)
+ if [[ $version == *"${k8s_version}"* ]]; then
+ header_text "Using cached envtest tools from ${dest_dir}"
+ return 0
+ fi
+ fi
+
+ header_text "fetching envtest tools@${k8s_version} (into '${dest_dir}')"
+ envtest_tools_archive_name="kubebuilder-tools-$k8s_version-$goos-$goarch.tar.gz"
+ envtest_tools_download_url="https://storage.googleapis.com/kubebuilder-tools/$envtest_tools_archive_name"
+
+ envtest_tools_archive_path="$tmp_root/$envtest_tools_archive_name"
+ if [ ! -f $envtest_tools_archive_path ]; then
+ curl -sL ${envtest_tools_download_url} -o "$envtest_tools_archive_path"
+ fi
+
+ mkdir -p "${dest_dir}"
+ tar -C "${dest_dir}" --strip-components=1 -zvxf "$envtest_tools_archive_path"
+}
+
+bin_dir="$(git rev-parse --show-toplevel)/bin"
+kb_root_dir="$bin_dir/kubebuilder"
+
+mkdir -p "$kb_root_dir"
+fetch_envtest_tools "$kb_root_dir"
+setup_envtest_env "$kb_root_dir"
diff --git a/vendor/github.com/gardener/gardener/hack/test-cover-clean.sh b/vendor/github.com/gardener/gardener/hack/test-cover-clean.sh
new file mode 100755
index 0000000..7521f19
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/test-cover-clean.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -e
+
+echo "> Test Cover Clean"
+
+find . -name "*.coverprofile" -type f -delete
+rm -f test.coverage.html test.coverprofile
diff --git a/vendor/github.com/gardener/gardener/hack/test-cover.sh b/vendor/github.com/gardener/gardener/hack/test-cover.sh
new file mode 100755
index 0000000..5262ef3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/test-cover.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -e
+
+source "$(dirname $0)/setup-envtest.sh"
+
+echo "> Test Cover"
+
+GO111MODULE=on ginkgo -cover -timeout=2m -race -mod=vendor $@
+
+REPO_ROOT="$(git rev-parse --show-toplevel)"
+COVERPROFILE="$REPO_ROOT/test.coverprofile"
+COVERPROFILE_TMP="$REPO_ROOT/test.coverprofile.tmp"
+COVERPROFILE_HTML="$REPO_ROOT/test.coverage.html"
+
+echo "mode: set" > "$COVERPROFILE_TMP"
+find . -name "*.coverprofile" -type f | xargs cat | grep -v mode: | sort -r | awk '{if($1 != last) {print $0;last=$1}}' >> "$COVERPROFILE_TMP"
+cat "$COVERPROFILE_TMP" | grep -vE "\.pb\.go|zz_generated" > "$COVERPROFILE"
+rm -rf "$COVERPROFILE_TMP"
+go tool cover -html="$COVERPROFILE" -o="$COVERPROFILE_HTML"
+
+go tool cover -func="$COVERPROFILE"
diff --git a/vendor/github.com/gardener/gardener/hack/test-prometheus.sh b/vendor/github.com/gardener/gardener/hack/test-prometheus.sh
new file mode 100755
index 0000000..50d65ad
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/test-prometheus.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -e
+
+echo "> Test Prometheus"
+
+echo "Executing Prometheus alert tests"
+pushd "$(dirname $0)/../charts/seed-monitoring/charts/core/charts/prometheus" > /dev/null
+promtool test rules rules-tests/*test.yaml
+popd > /dev/null
+
+echo "Executing aggregate Prometheus alert tests"
+pushd "$(dirname $0)/../charts/seed-bootstrap/aggregate-prometheus-rules-tests" > /dev/null
+promtool test rules *test.yaml
+popd > /dev/null
diff --git a/vendor/github.com/gardener/gardener/hack/test.sh b/vendor/github.com/gardener/gardener/hack/test.sh
new file mode 100755
index 0000000..f9f98d2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/test.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -e
+
+source "$(dirname $0)/setup-envtest.sh"
+
+echo "> Test"
+
+GO111MODULE=on go test -race -mod=vendor $@ | grep -v 'no test files'
diff --git a/vendor/github.com/gardener/gardener/hack/tools.go b/vendor/github.com/gardener/gardener/hack/tools.go
new file mode 100644
index 0000000..1af0eac
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/tools.go
@@ -0,0 +1,29 @@
+// +build tools
+
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package imports things required by build scripts, to force `go mod` to see them as dependencies
+package tools
+
+import (
+ _ "github.com/ahmetb/gen-crd-api-reference-docs"
+ _ "github.com/golang/mock/mockgen"
+ _ "github.com/onsi/ginkgo/ginkgo"
+ _ "golang.org/x/lint/golint"
+ _ "k8s.io/code-generator"
+ _ "k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo"
+ _ "k8s.io/kube-openapi/cmd/openapi-gen"
+ _ "sigs.k8s.io/controller-tools/cmd/controller-gen"
+)
diff --git a/vendor/github.com/gardener/gardener/hack/update-codegen.sh b/vendor/github.com/gardener/gardener/hack/update-codegen.sh
new file mode 100755
index 0000000..380f6b1
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/update-codegen.sh
@@ -0,0 +1,190 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f ${GOPATH}/bin/*-gen
+
+CURRENT_DIR=$(dirname $0)
+PROJECT_ROOT="${CURRENT_DIR}"/..
+
+# core.gardener.cloud APIs
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter,client,lister,informer \
+ github.com/gardener/gardener/pkg/client/core \
+ github.com/gardener/gardener/pkg/apis \
+ github.com/gardener/gardener/pkg/apis \
+ "core:v1alpha1,v1beta1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/gardener/gardener/pkg/client/core \
+ github.com/gardener/gardener/pkg/apis \
+ github.com/gardener/gardener/pkg/apis \
+ "core:v1alpha1,v1beta1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# extensions.gardener.cloud APIs
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-groups.sh \
+ "deepcopy,client,informer,lister" \
+ github.com/gardener/gardener/pkg/client/extensions \
+ github.com/gardener/gardener/pkg/apis \
+ "extensions:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# settings.gardener.cloud APIs
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-groups.sh \
+ "all" \
+ github.com/gardener/gardener/pkg/client/settings \
+ github.com/gardener/gardener/pkg/apis \
+ "settings:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ "deepcopy,defaulter,conversion" \
+ github.com/gardener/gardener/pkg/client/settings \
+ github.com/gardener/gardener/pkg/apis \
+ github.com/gardener/gardener/pkg/apis \
+ "settings:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# Componentconfig for controller-manager
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter \
+ github.com/gardener/gardener/pkg/client/componentconfig \
+ github.com/gardener/gardener/pkg/controllermanager/apis \
+ github.com/gardener/gardener/pkg/controllermanager/apis \
+ "config:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/gardener/gardener/pkg/client/componentconfig \
+ github.com/gardener/gardener/pkg/controllermanager/apis \
+ github.com/gardener/gardener/pkg/controllermanager/apis \
+ "config:v1alpha1" \
+ --extra-peer-dirs=github.com/gardener/gardener/pkg/controllermanager/apis/config,github.com/gardener/gardener/pkg/controllermanager/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime,k8s.io/component-base/config,k8s.io/component-base/config/v1alpha1 \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# Componentconfig for admission controller
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter \
+ github.com/gardener/gardener/pkg/client/admissioncontrollerconfig \
+ github.com/gardener/gardener/pkg/admissioncontroller/apis \
+ github.com/gardener/gardener/pkg/admissioncontroller/apis \
+ "config:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/gardener/gardener/pkg/client/admissioncontrollerconfig \
+ github.com/gardener/gardener/pkg/admissioncontroller/apis \
+ github.com/gardener/gardener/pkg/admissioncontroller/apis \
+ "config:v1alpha1" \
+ --extra-peer-dirs=github.com/gardener/gardener/pkg/admissioncontroller/apis/config,github.com/gardener/gardener/pkg/admissioncontroller/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime,k8s.io/component-base/config,k8s.io/component-base/config/v1alpha1 \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# Configuration for gardener scheduler
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter \
+ github.com/gardener/gardener/pkg/scheduler/client \
+ github.com/gardener/gardener/pkg/scheduler/apis \
+ github.com/gardener/gardener/pkg/scheduler/apis \
+ "config:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/gardener/gardener/pkg/scheduler/client \
+ github.com/gardener/gardener/pkg/scheduler/apis \
+ github.com/gardener/gardener/pkg/scheduler/apis \
+ "config:v1alpha1" \
+ --extra-peer-dirs=github.com/gardener/gardener/pkg/scheduler/apis/config,github.com/gardener/gardener/pkg/scheduler/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime,k8s.io/component-base/config,k8s.io/component-base/config/v1alpha1 \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# Componentconfig for gardenlet
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter \
+ github.com/gardener/gardener/pkg/client/componentconfig \
+ github.com/gardener/gardener/pkg/gardenlet/apis \
+ github.com/gardener/gardener/pkg/gardenlet/apis \
+ "config:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/gardener/gardener/pkg/client/componentconfig \
+ github.com/gardener/gardener/pkg/gardenlet/apis \
+ github.com/gardener/gardener/pkg/gardenlet/apis \
+ "config:v1alpha1" \
+ --extra-peer-dirs=github.com/gardener/gardener/pkg/gardenlet/apis/config,github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime,k8s.io/component-base/config,k8s.io/component-base/config/v1alpha1 \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# Componentconfig for admission plugins
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ deepcopy,defaulter \
+ github.com/gardener/gardener/pkg/client/componentconfig \
+ github.com/gardener/gardener/plugin/pkg/shoot/tolerationrestriction/apis \
+ github.com/gardener/gardener/plugin/pkg/shoot/tolerationrestriction/apis \
+ "shoottolerationrestriction:v1alpha1" \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
+ conversion \
+ github.com/gardener/gardener/pkg/client/componentconfig \
+ github.com/gardener/gardener/plugin/pkg/shoot/tolerationrestriction/apis \
+ github.com/gardener/gardener/plugin/pkg/shoot/tolerationrestriction/apis \
+ "shoottolerationrestriction:v1alpha1" \
+ --extra-peer-dirs=github.com/gardener/gardener/plugin/pkg/shoot/tolerationrestriction/apis/shoottolerationrestriction,github.com/gardener/gardener/plugin/pkg/shoot/tolerationrestriction/apis/shoottolerationrestriction/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime,k8s.io/component-base/config,k8s.io/component-base/config/v1alpha1 \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+# OpenAPI definitions
+
+echo "Generating openapi definitions"
+rm -Rf ./${PROJECT_ROOT}/openapi/openapi_generated.go
+go install ./${PROJECT_ROOT}/vendor/k8s.io/kube-openapi/cmd/openapi-gen
+${GOPATH}/bin/openapi-gen "$@" \
+ --v 1 \
+ --logtostderr \
+ --input-dirs=github.com/gardener/gardener/pkg/apis/core/v1alpha1 \
+ --input-dirs=github.com/gardener/gardener/pkg/apis/core/v1beta1 \
+ --input-dirs=github.com/gardener/gardener/pkg/apis/settings/v1alpha1 \
+ --input-dirs=k8s.io/api/core/v1 \
+ --input-dirs=k8s.io/api/rbac/v1 \
+ --input-dirs=k8s.io/api/autoscaling/v1 \
+ --input-dirs=k8s.io/apimachinery/pkg/apis/meta/v1 \
+ --input-dirs=k8s.io/apimachinery/pkg/api/resource \
+ --input-dirs=k8s.io/apimachinery/pkg/types \
+ --input-dirs=k8s.io/apimachinery/pkg/version \
+ --input-dirs=k8s.io/apimachinery/pkg/runtime \
+ --input-dirs=k8s.io/apimachinery/pkg/util/intstr \
+ --report-filename=${PROJECT_ROOT}/pkg/openapi/api_violations.report \
+ --output-package=github.com/gardener/gardener/pkg/openapi \
+ -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt"
+
+echo
+echo "NOTE: If you changed the API then consider updating the example manifests."
diff --git a/vendor/github.com/gardener/gardener/hack/update-protobuf.sh b/vendor/github.com/gardener/gardener/hack/update-protobuf.sh
new file mode 100755
index 0000000..7da93e7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/hack/update-protobuf.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+CURRENT_DIR="$(dirname $0)"
+PROJECT_ROOT="${CURRENT_DIR}"/..
+if [ "${PROJECT_ROOT#/}" == "${PROJECT_ROOT}" ]; then
+ PROJECT_ROOT="./$PROJECT_ROOT"
+fi
+
+pushd "$PROJECT_ROOT" > /dev/null
+APIROOTS=${APIROOTS:-$(git grep --files-with-matches -e '// +k8s:protobuf-gen=package' cmd pkg | \
+ xargs -n 1 dirname | \
+ sed 's,^,github.com/gardener/gardener/,;' | \
+ sort | uniq
+)}
+popd > /dev/null
+
+rm -f ${GOPATH}/bin/go-to-protobuf
+rm -f ${GOPATH}/bin/protoc-gen-gogo
+
+GOFLAGS="" go build -o ${GOPATH}/bin "$PROJECT_ROOT/vendor/k8s.io/code-generator/cmd/go-to-protobuf"
+GOFLAGS="" go build -o ${GOPATH}/bin "$PROJECT_ROOT/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo"
+
+if [[ -z "$(which protoc)" || "$(protoc --version)" != "libprotoc 3."* ]]; then
+ if [[ "$(uname -s)" == *"Darwin"* ]]; then
+ brew install protobuf
+ else
+ PROTOC_ZIP=protoc-3.7.1-linux-x86_64.zip
+ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/$PROTOC_ZIP
+ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
+ unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
+ rm -f $PROTOC_ZIP
+ fi
+
+ echo "WARNING: Protobuf changes are not being validated"
+fi
+
+read -ra PACKAGES <<< $(echo ${APIROOTS})
+
+# requires the 'proto' tag to build (will remove when ready)
+# searches for the protoc-gen-gogo extension in the output directory
+# satisfies import of github.com/gogo/protobuf/gogoproto/gogo.proto and the
+# core Google protobuf types
+go-to-protobuf \
+ --packages="$(IFS=, ; echo "${PACKAGES[*]}")" \
+ --apimachinery-packages='-k8s.io/apimachinery/pkg/util/intstr,-k8s.io/apimachinery/pkg/api/resource,-k8s.io/apimachinery/pkg/runtime/schema,-k8s.io/apimachinery/pkg/runtime,-k8s.io/apimachinery/pkg/apis/meta/v1,-k8s.io/apimachinery/pkg/apis/meta/v1beta1,-k8s.io/api/core/v1,-k8s.io/api/rbac/v1,-k8s.io/api/autoscaling/v1' \
+ --go-header-file=${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt \
+ --proto-import=${PROJECT_ROOT}/vendor
diff --git a/vendor/github.com/gardener/gardener/pkg/api/core/accessor.go b/vendor/github.com/gardener/gardener/pkg/api/core/accessor.go
new file mode 100644
index 0000000..85cbcc7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/api/core/accessor.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "fmt"
+
+ gardencore "github.com/gardener/gardener/pkg/apis/core"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Accessor tries to create a `gardencore.Object` from the given runtime.Object.
+//
+// If the given object already implements Object, it is returned as-is.
+// Otherwise, an error with the type of the object is returned.
+func Accessor(obj runtime.Object) (gardencore.Object, error) {
+ switch v := obj.(type) {
+ case gardencore.Object:
+ return v, nil
+ default:
+ return nil, fmt.Errorf("value of type %T does not implement Object", obj)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/api/extensions/accessor.go b/vendor/github.com/gardener/gardener/pkg/api/extensions/accessor.go
new file mode 100644
index 0000000..5c15231
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/api/extensions/accessor.go
@@ -0,0 +1,263 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extensions
+
+import (
+ "fmt"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Accessor tries to create an extensionsv1alpha1.Object from the given runtime.Object.
+//
+// If the given object already implements object, it is returned as-is.
+// If the object is unstructured, an unstructured accessor is returned that retrieves values
+// on a best-effort basis.
+// Otherwise, an error with the type of the object is returned.
+func Accessor(obj runtime.Object) (extensionsv1alpha1.Object, error) {
+ switch v := obj.(type) {
+ case extensionsv1alpha1.Object:
+ return v, nil
+ case *unstructured.Unstructured:
+ return UnstructuredAccessor(v), nil
+ default:
+ return nil, fmt.Errorf("value of type %T does not implement Object", obj)
+ }
+}
+
+// UnstructuredAccessor is an Object that retrieves values on a best-effort basis.
+// If values don't exist, it usually returns the zero value of them.
+func UnstructuredAccessor(u *unstructured.Unstructured) extensionsv1alpha1.Object {
+ return unstructuredAccessor{u}
+}
+
+type unstructuredAccessor struct {
+ *unstructured.Unstructured
+}
+
+type unstructuredSpecAccessor struct {
+ *unstructured.Unstructured
+}
+
+type unstructuredStatusAccessor struct {
+ *unstructured.Unstructured
+}
+
+func nestedString(obj map[string]interface{}, fields ...string) string {
+ v, ok, err := unstructured.NestedString(obj, fields...)
+ if err != nil || !ok {
+ return ""
+ }
+ return v
+}
+
+func nestedInt64(obj map[string]interface{}, fields ...string) int64 {
+ v, ok, err := unstructured.NestedInt64(obj, fields...)
+ if err != nil || !ok {
+ return 0
+ }
+ return v
+}
+
+func nestedStringReference(obj map[string]interface{}, fields ...string) *string {
+ v, ok, err := unstructured.NestedString(obj, fields...)
+ if err != nil || !ok {
+ return nil
+ }
+
+ return &v
+}
+
+func nestedRawExtension(obj map[string]interface{}, fields ...string) *runtime.RawExtension {
+ val, ok, err := unstructured.NestedFieldNoCopy(obj, fields...)
+ if err != nil || !ok {
+ return nil
+ }
+
+ data, ok := val.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+
+ rawExtension := &runtime.RawExtension{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(data, rawExtension); err != nil {
+ return nil
+ }
+
+ return rawExtension
+}
+
+// GetExtensionSpec implements Object.
+func (u unstructuredAccessor) GetExtensionSpec() extensionsv1alpha1.Spec {
+ return unstructuredSpecAccessor(u)
+}
+
+// GetExtensionType implements Spec.
+func (u unstructuredSpecAccessor) GetExtensionType() string {
+ return nestedString(u.UnstructuredContent(), "spec", "type")
+}
+
+// GetExtensionPurpose implements Spec.
+func (u unstructuredSpecAccessor) GetExtensionPurpose() *string {
+ return nestedStringReference(u.UnstructuredContent(), "spec", "purpose")
+}
+
+// GetProviderConfig implements Spec.
+func (u unstructuredSpecAccessor) GetProviderConfig() *runtime.RawExtension {
+ return nestedRawExtension(u.UnstructuredContent(), "spec", "providerConfig")
+}
+
+// GetExtensionStatus implements Object.
+func (u unstructuredAccessor) GetExtensionStatus() extensionsv1alpha1.Status {
+ return unstructuredStatusAccessor(u)
+}
+
+// GetProviderStatus implements Status.
+func (u unstructuredStatusAccessor) GetProviderStatus() *runtime.RawExtension {
+ return nestedRawExtension(u.UnstructuredContent(), "status", "providerStatus")
+}
+
+// GetLastOperation implements Status.
+func (u unstructuredStatusAccessor) GetLastOperation() *gardencorev1beta1.LastOperation {
+ val, ok, err := unstructured.NestedFieldNoCopy(u.UnstructuredContent(), "status", "lastOperation")
+ if err != nil || !ok {
+ return nil
+ }
+
+ lastOperation := &gardencorev1beta1.LastOperation{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(val.(map[string]interface{}), lastOperation); err != nil {
+ return nil
+ }
+ return lastOperation
+}
+
+// GetLastError implements Status.
+func (u unstructuredStatusAccessor) GetLastError() *gardencorev1beta1.LastError {
+ val, ok, err := unstructured.NestedFieldNoCopy(u.UnstructuredContent(), "status", "lastError")
+ if err != nil || !ok {
+ return nil
+ }
+
+ lastError := &gardencorev1beta1.LastError{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(val.(map[string]interface{}), lastError); err != nil {
+ return nil
+ }
+ return lastError
+}
+
+// GetObservedGeneration implements Status.
+func (u unstructuredStatusAccessor) GetObservedGeneration() int64 {
+ return nestedInt64(u.Object, "status", "observedGeneration")
+}
+
+// GetState implements Status.
+func (u unstructuredStatusAccessor) GetState() *runtime.RawExtension {
+ val, ok, err := unstructured.NestedFieldNoCopy(u.UnstructuredContent(), "status", "state")
+ if err != nil || !ok {
+ return nil
+ }
+ raw := &runtime.RawExtension{}
+ err = runtime.DefaultUnstructuredConverter.FromUnstructured(val.(map[string]interface{}), raw)
+ if err != nil {
+ return nil
+ }
+ return raw
+}
+
+// SetState implements Status.
+func (u unstructuredStatusAccessor) SetState(state *runtime.RawExtension) {
+ unstrc, err := runtime.DefaultUnstructuredConverter.ToUnstructured(state)
+ if err != nil {
+ return
+ }
+
+ if err := unstructured.SetNestedField(u.UnstructuredContent(), unstrc, "status", "state"); err != nil {
+ return
+ }
+}
+
+// GetConditions implements Status.
+func (u unstructuredStatusAccessor) GetConditions() []gardencorev1beta1.Condition {
+ val, ok, err := unstructured.NestedFieldNoCopy(u.UnstructuredContent(), "status", "conditions")
+ if err != nil || !ok {
+ return nil
+ }
+ var conditions []gardencorev1beta1.Condition
+ interfaceConditionSlice := val.([]interface{})
+ for _, interfaceCondition := range interfaceConditionSlice {
+ new := interfaceCondition.(map[string]interface{})
+ condition := &gardencorev1beta1.Condition{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(new, condition); err != nil {
+ return nil
+ }
+ conditions = append(conditions, *condition)
+ }
+ return conditions
+}
+
+// SetConditions implements Status.
+func (u unstructuredStatusAccessor) SetConditions(conditions []gardencorev1beta1.Condition) {
+ var interfaceSlice = make([]interface{}, len(conditions))
+ for i, d := range conditions {
+ unstrc, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&d)
+ if err != nil {
+ return
+ }
+ interfaceSlice[i] = unstrc
+ }
+ err := unstructured.SetNestedSlice(u.UnstructuredContent(), interfaceSlice, "status", "conditions")
+ if err != nil {
+ return
+ }
+}
+
+// GetResources implements Status.
+func (u unstructuredStatusAccessor) GetResources() []gardencorev1beta1.NamedResourceReference {
+ val, ok, err := unstructured.NestedFieldNoCopy(u.UnstructuredContent(), "status", "resources")
+ if err != nil || !ok {
+ return nil
+ }
+ var resources []gardencorev1beta1.NamedResourceReference
+ interfaceResourceSlice := val.([]interface{})
+ for _, interfaceResource := range interfaceResourceSlice {
+ new := interfaceResource.(map[string]interface{})
+ resource := &gardencorev1beta1.NamedResourceReference{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(new, resource); err != nil {
+ return nil
+ }
+ resources = append(resources, *resource)
+ }
+ return resources
+}
+
+// SetResources implements Status.
+func (u unstructuredStatusAccessor) SetResources(namedResourceReference []gardencorev1beta1.NamedResourceReference) {
+ var interfaceSlice = make([]interface{}, len(namedResourceReference))
+ for i, d := range namedResourceReference {
+ unstrc, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&d)
+ if err != nil {
+ return
+ }
+ interfaceSlice[i] = unstrc
+ }
+ err := unstructured.SetNestedSlice(u.UnstructuredContent(), interfaceSlice, "status", "resources")
+ if err != nil {
+ return
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go b/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go
new file mode 100644
index 0000000..c877f3d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extensions
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+)
+
+// GetShootNamespacedCRsLists returns an empty CR list struct, for each CR used for Shoot management
+func GetShootNamespacedCRsLists() []client.ObjectList {
+ return []client.ObjectList{
+ // The ControlPlane CR is now handled as a shoot component
+ // &extensionsv1alpha1.ControlPlaneList{},
+ // The Extension CR is now handled as a shoot component
+ // &extensionsv1alpha1.ExtensionList{},
+ // The Infrastructure CR is now handled as a shoot component
+ // &extensionsv1alpha1.InfrastructureList{},
+ // The Network CR is now handled as a shoot component
+ // &extensionsv1alpha1.NetworkList{},
+ &extensionsv1alpha1.OperatingSystemConfigList{},
+ // The Worker CR is now handled as a shoot component
+ // &extensionsv1alpha1.WorkerList{},
+ // The ContainerRuntime CR is now handled as a shoot component
+ // &extensionsv1alpha1.ContainerRuntimeList{},
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/core/doc.go
new file mode 100644
index 0000000..effda8b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/doc.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +k8s:deepcopy-gen=package
+
+// Package core is the internal version of the API.
+// +groupName=core.gardener.cloud
+package core
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go
new file mode 100644
index 0000000..a951d77
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+// Field path constants that are specific to the internal API
+// representation.
+const (
+ // BackupBucketSeedName is the field selector path for finding
+ // the Seed cluster of a core.gardener.cloud/v1beta1 BackupBucket.
+ BackupBucketSeedName = "spec.seedName"
+ // BackupEntrySeedName is the field selector path for finding
+ // the Seed cluster of a core.gardener.cloud/v1beta1 BackupEntry.
+ BackupEntrySeedName = "spec.seedName"
+
+ // RegistrationRefName is the field selector path for finding
+ // the ControllerRegistration name of a core.gardener.cloud/{v1beta1,v1beta1} ControllerInstallation.
+ RegistrationRefName = "spec.registrationRef.name"
+ // SeedRefName is the field selector path for finding
+ // the Seed name of a core.gardener.cloud/{v1beta1,v1beta1} ControllerInstallation.
+ SeedRefName = "spec.seedRef.name"
+
+ // ShootCloudProfileName is the field selector path for finding
+ // the CloudProfile name of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot.
+ ShootCloudProfileName = "spec.cloudProfileName"
+ // ShootSeedName is the field selector path for finding
+ // the Seed cluster of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot.
+ ShootSeedName = "spec.seedName"
+ // ShootStatusSeedName is the field selector path for finding
+ // the Seed cluster of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot
+ // referred in the status.
+ ShootStatusSeedName = "status.seedName"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/install/install.go b/vendor/github.com/gardener/gardener/pkg/apis/core/install/install.go
new file mode 100644
index 0000000..6b72154
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/install/install.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package install
+
+import (
+ "github.com/gardener/gardener/pkg/apis/core"
+ "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Install registers the API group and adds types to a scheme.
+func Install(scheme *runtime.Scheme) {
+ utilruntime.Must(core.AddToScheme(scheme))
+ utilruntime.Must(v1alpha1.AddToScheme(scheme))
+ utilruntime.Must(v1beta1.AddToScheme(scheme))
+
+ utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/register.go b/vendor/github.com/gardener/gardener/pkg/apis/core/register.go
new file mode 100644
index 0000000..63db019
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/register.go
@@ -0,0 +1,74 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the name of the core API group.
+const GroupName = "core.gardener.cloud"
+
+// SchemeGroupVersion is group version used to register these objects.
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind.
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource.
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is a new Scheme Builder which registers our API.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a reference to the Scheme Builder's AddToScheme function.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &BackupBucket{},
+ &BackupBucketList{},
+ &BackupEntry{},
+ &BackupEntryList{},
+ &CloudProfile{},
+ &CloudProfileList{},
+ &ControllerRegistration{},
+ &ControllerRegistrationList{},
+ &ControllerInstallation{},
+ &ControllerInstallationList{},
+ &Plant{},
+ &PlantList{},
+ &Project{},
+ &ProjectList{},
+ &Quota{},
+ &QuotaList{},
+ &SecretBinding{},
+ &SecretBindingList{},
+ &Seed{},
+ &SeedList{},
+ &ShootState{},
+ &ShootStateList{},
+ &Shoot{},
+ &ShootList{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types.go
new file mode 100644
index 0000000..abdcad0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+const (
+ // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds'
+ // status using Lease resources for each Seed
+ GardenerSeedLeaseNamespace = "gardener-system-seed-lease"
+)
+
+// Object is an core object resource.
+type Object interface {
+ metav1.Object
+ // GetProviderType gets the type of the provider.
+ GetProviderType() string
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go
new file mode 100644
index 0000000..a8d65be
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucket holds details about backup bucket
+type BackupBucket struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Specification of the Backup Bucket.
+ Spec BackupBucketSpec
+ // Most recently observed status of the Backup Bucket.
+ Status BackupBucketStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucketList is a list of BackupBucket objects.
+type BackupBucketList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of BackupBucket.
+ Items []BackupBucket
+}
+
+// BackupBucketSpec is the specification of a Backup Bucket.
+type BackupBucketSpec struct {
+ // Provider holds the details of cloud provider of the object store.
+ Provider BackupBucketProvider
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ ProviderConfig *runtime.RawExtension
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ SecretRef corev1.SecretReference
+ // SeedName holds the name of the seed allocated to BackupBucket for running controller.
+ SeedName *string
+}
+
+// BackupBucketStatus holds the most recently observed status of the Backup Bucket.
+type BackupBucketStatus struct {
+ // ProviderStatus is the configuration passed to BackupBucket resource.
+ ProviderStatus *runtime.RawExtension
+ // LastOperation holds information about the last operation on the BackupBucket.
+ LastOperation *LastOperation
+ // LastError holds information about the last occurred error during an operation.
+ LastError *LastError
+ // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the
+ // BackupBucket's generation, which is updated on mutation by the API Server.
+ ObservedGeneration int64
+ // GeneratedSecretRef is reference to the secret generated by backup bucket, which
+ // will have object store specific credentials.
+ GeneratedSecretRef *corev1.SecretReference
+}
+
+// BackupBucketProvider holds the details of cloud provider of the object store.
+type BackupBucketProvider struct {
+ // Type is the type of provider.
+ Type string
+ // Region is the region of the bucket.
+ Region string
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go
new file mode 100644
index 0000000..2873347
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // BackupEntryForceDeletion is a constant for an annotation on a BackupEntry indicating that it should be force deleted.
+ BackupEntryForceDeletion = "backupentry.core.gardener.cloud/force-deletion"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntry holds details about shoot backup.
+type BackupEntry struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec contains the specification of the Backup Entry.
+ Spec BackupEntrySpec
+ // Status contains the most recently observed status of the Backup Entry.
+ Status BackupEntryStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntryList is a list of BackupEntry objects.
+type BackupEntryList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of BackupEntry.
+ Items []BackupEntry
+}
+
+// BackupEntrySpec is the specification of a Backup Entry.
+type BackupEntrySpec struct {
+ // BucketName is the name of backup bucket for this Backup Entry.
+ BucketName string
+ // SeedName holds the name of the seed allocated to BackupBucket for running controller.
+ SeedName *string
+}
+
+// BackupEntryStatus holds the most recently observed status of the Backup Entry.
+type BackupEntryStatus struct {
+ // LastOperation holds information about the last operation on the BackupEntry.
+ LastOperation *LastOperation
+ // LastError holds information about the last occurred error during an operation.
+ LastError *LastError
+ // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the
+ // BackupEntry's generation, which is updated on mutation by the API Server.
+ ObservedGeneration int64
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go
new file mode 100644
index 0000000..184b363
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go
@@ -0,0 +1,198 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CloudProfile represents certain properties about a provider environment.
+type CloudProfile struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec defines the provider environment properties.
+ Spec CloudProfileSpec
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CloudProfileList is a collection of CloudProfiles.
+type CloudProfileList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of CloudProfiles.
+ Items []CloudProfile
+}
+
+// CloudProfileSpec is the specification of a CloudProfile.
+// It must contain exactly one of its defined keys.
+type CloudProfileSpec struct {
+ // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile.
+ CABundle *string
+ // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+ Kubernetes KubernetesSettings
+ // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification.
+ MachineImages []MachineImage
+ // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification.
+ MachineTypes []MachineType
+ // ProviderConfig contains provider-specific configuration for the profile.
+ ProviderConfig *runtime.RawExtension
+ // Regions contains constraints regarding allowed values for regions and zones.
+ Regions []Region
+ // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile.
+ // An empty list means that all seeds of the same provider type are supported.
+ // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes.
+ // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider
+ // type of the seed must match the shoot's provider.
+ SeedSelector *SeedSelector
+ // Type is the name of the provider.
+ Type string
+ // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification.
+ VolumeTypes []VolumeType
+}
+
+func (c *CloudProfile) GetProviderType() string {
+ return c.Spec.Type
+}
+
+// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile
+type SeedSelector struct {
+ // LabelSelector is optional and can be used to select seeds by their label settings
+ *metav1.LabelSelector
+ // ProviderTypes contains a list of allowed provider types used by the Gardener scheduler to restricting seeds by
+ // their provider type and enable cross-provider scheduling.
+ // By default, Shoots are only scheduled on Seeds having the same provider type.
+ ProviderTypes []string
+}
+
+// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+type KubernetesSettings struct {
+ // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters.
+ Versions []ExpirableVersion
+}
+
+// MachineImage defines the name and multiple versions of the machine image in any environment.
+type MachineImage struct {
+ // Name is the name of the image.
+ Name string
+ // Versions contains versions, expiration dates and container runtimes of the machine image
+ Versions []MachineImageVersion
+}
+
+// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces
+type MachineImageVersion struct {
+ ExpirableVersion
+ // CRI list of supported container runtime and interfaces supported by this version
+ CRI []CRI
+}
+
+// ExpirableVersion contains a version and an expiration date.
+type ExpirableVersion struct {
+ // Version is the version identifier.
+ Version string
+ // ExpirationDate defines the time at which this version expires.
+ ExpirationDate *metav1.Time
+ // Classification defines the state of a version (preview, supported, deprecated)
+ Classification *VersionClassification
+}
+
+// MachineType contains certain properties of a machine type.
+type MachineType struct {
+ // CPU is the number of CPUs for this machine type.
+ CPU resource.Quantity
+ // GPU is the number of GPUs for this machine type.
+ GPU resource.Quantity
+ // Memory is the amount of memory for this machine type.
+ Memory resource.Quantity
+ // Name is the name of the machine type.
+ Name string
+ // Storage is the amount of storage associated with the root volume of this machine type.
+ Storage *MachineTypeStorage
+ // Usable defines if the machine type can be used for shoot clusters.
+ Usable *bool
+}
+
+// MachineTypeStorage is the amount of storage associated with the root volume of this machine type.
+type MachineTypeStorage struct {
+ // Class is the class of the storage type.
+ Class string
+ // StorageSize is the storage size.
+ StorageSize resource.Quantity
+ // Type is the type of the storage.
+ Type string
+}
+
+// Region contains certain properties of a region.
+type Region struct {
+ // Name is a region name.
+ Name string
+ // Zones is a list of availability zones in this region.
+ Zones []AvailabilityZone
+ // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region.
+ // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt
+ // quality, reliability, access restrictions, etc.
+ Labels map[string]string
+}
+
+// AvailabilityZone is an availability zone.
+type AvailabilityZone struct {
+ // Name is an an availability zone name.
+ Name string
+ // UnavailableMachineTypes is a list of machine type names that are not availability in this zone.
+ UnavailableMachineTypes []string
+ // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone.
+ UnavailableVolumeTypes []string
+}
+
+// VolumeType contains certain properties of a volume type.
+type VolumeType struct {
+ // Class is the class of the volume type.
+ Class string
+ // Name is the name of the volume type.
+ Name string
+ // Usable defines if the volume type can be used for shoot clusters.
+ Usable *bool
+}
+
+const (
+ // VolumeClassStandard is a constant for the standard volume class.
+ VolumeClassStandard string = "standard"
+ // VolumeClassPremium is a constant for the premium volume class.
+ VolumeClassPremium string = "premium"
+)
+
+// VersionClassification is the logical state of a version according to https://github.com/gardener/gardener/blob/master/docs/operations/versioning.md
+type VersionClassification string
+
+const (
+ // ClassificationPreview indicates that a version has recently been added and not promoted to "Supported" yet.
+ // ClassificationPreview versions will not be considered for automatic Kubernetes and Machine Image patch version updates.
+ ClassificationPreview VersionClassification = "preview"
+ // ClassificationSupported indicates that a patch version is the recommended version for a shoot.
+ // Using VersionMaintenance (see: https://github.com/gardener/gardener/docs/operation/versioning.md) there is one supported version per maintained minor version.
+ // Supported versions are eligible for the automated Kubernetes and Machine image patch version update for shoot clusters in Gardener.
+ ClassificationSupported VersionClassification = "supported"
+ // ClassificationDeprecated indicates that a patch version should not be used anymore, should be updated to a new version
+ // and will eventually expire.
+ ClassificationDeprecated VersionClassification = "deprecated"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go
new file mode 100644
index 0000000..abb184a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ErrorCode is a string alias.
+type ErrorCode string
+
+const (
+ // ErrorInfraUnauthorized indicates that the last error occurred due to invalid infrastructure credentials.
+ ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED"
+ // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient infrastructure privileges.
+ ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES"
+ // ErrorInfraQuotaExceeded indicates that the last error occurred due to infrastructure quota limits.
+ ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED"
+ // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the infrastructure level.
+ ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES"
+ // ErrorInfraResourcesDepleted indicates that the last error occurred due to depleted resource in the infrastructure.
+ ErrorInfraResourcesDepleted ErrorCode = "ERR_INFRA_RESOURCES_DEPLETED"
+ // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster that are stuck in deletion.
+ ErrorCleanupClusterResources ErrorCode = "ERR_CLEANUP_CLUSTER_RESOURCES"
+ // ErrorConfigurationProblem indicates that the last error occurred due to a configuration problem.
+ ErrorConfigurationProblem ErrorCode = "ERR_CONFIGURATION_PROBLEM"
+)
+
+// LastError indicates the last occurred error for an operation on a resource.
+type LastError struct {
+ // A human readable message indicating details about the last error.
+ Description string
+ // ID of the task which caused this last error
+ TaskID *string
+ // Well-defined error codes of the last error(s).
+ // +optional
+ Codes []ErrorCode
+ // Last time the error was reported
+ LastUpdateTime *metav1.Time
+}
+
+// LastOperationType is a string alias.
+type LastOperationType string
+
+const (
+ // LastOperationTypeReconcile indicates a 'reconcile' operation.
+ LastOperationTypeReconcile LastOperationType = "Reconcile"
+ // LastOperationTypeDelete indicates a 'delete' operation.
+ LastOperationTypeDelete LastOperationType = "Delete"
+ // LastOperationTypeRestore indicates a 'restore' operation.
+ LastOperationTypeRestore LastOperationType = "Restore"
+)
+
+// LastOperationState is a string alias.
+type LastOperationState string
+
+const (
+ // LastOperationStateProcessing indicates that an operation is ongoing.
+ LastOperationStateProcessing LastOperationState = "Processing"
+ // LastOperationStateSucceeded indicates that an operation has completed successfully.
+ LastOperationStateSucceeded LastOperationState = "Succeeded"
+ // LastOperationStateError indicates that an operation is completed with errors and will be retried.
+ LastOperationStateError LastOperationState = "Error"
+ // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried.
+ LastOperationStateFailed LastOperationState = "Failed"
+ // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future.
+ LastOperationStatePending LastOperationState = "Pending"
+ // LastOperationStateAborted indicates that an operation has been aborted.
+ LastOperationStateAborted LastOperationState = "Aborted"
+)
+
+// LastOperation indicates the type and the state of the last operation, along with a description
+// message and a progress indicator.
+type LastOperation struct {
+ // A human readable message indicating details about the last operation.
+ Description string
+ // Last time the operation state transitioned from one to another.
+ LastUpdateTime metav1.Time
+ // The progress in percentage (0-100) of the last operation.
+ Progress int32
+ // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed.
+ State LastOperationState
+ // Type of the last operation, one of Create, Reconcile, Delete.
+ Type LastOperationType
+}
+
+// Gardener holds the information about the Gardener.
+type Gardener struct {
+ // ID is the Docker container id of the Gardener which last acted on a Shoot cluster.
+ ID string
+ // Name is the hostname (pod name) of the Gardener which last acted on a Shoot cluster.
+ Name string
+ // Version is the version of the Gardener which last acted on a Shoot cluster.
+ Version string
+}
+
+const (
+ // GardenerName is the value in a Garden resource's `.metadata.finalizers[]` array on which the Gardener will react
+ // when performing a delete request on a resource.
+ GardenerName = "gardener"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go
new file mode 100644
index 0000000..1c9b5d9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerInstallation represents an installation request for an external controller.
+type ControllerInstallation struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec contains the specification of this installation.
+ Spec ControllerInstallationSpec
+ // Status contains the status of this installation.
+ Status ControllerInstallationStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerInstallationList is a collection of ControllerInstallations.
+type ControllerInstallationList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of ControllerInstallations.
+ Items []ControllerInstallation
+}
+
+// ControllerInstallationSpec is the specification of a ControllerInstallation.
+type ControllerInstallationSpec struct {
+ // RegistrationRef is used to reference a ControllerRegistration resources.
+ RegistrationRef corev1.ObjectReference
+ // SeedRef is used to reference a Seed resources.
+ SeedRef corev1.ObjectReference
+}
+
+// ControllerInstallationStatus is the status of a ControllerInstallation.
+type ControllerInstallationStatus struct {
+ // Conditions represents the latest available observations of a ControllerInstallations's current state.
+ Conditions []Condition
+ // ProviderStatus contains type-specific status.
+ // +optional
+ ProviderStatus *runtime.RawExtension
+}
+
+const (
+ // ControllerInstallationHealthy is a condition type for indicating whether the controller is healthy.
+ ControllerInstallationHealthy ConditionType = "Healthy"
+ // ControllerInstallationInstalled is a condition type for indicating whether the controller has been installed.
+ ControllerInstallationInstalled ConditionType = "Installed"
+ // ControllerInstallationValid is a condition type for indicating whether the installation request is valid.
+ ControllerInstallationValid ConditionType = "Valid"
+ // ControllerInstallationRequired is a condition type for indicating that the respective extension controller is
+ // still required on the seed cluster as corresponding extension resources still exist.
+ ControllerInstallationRequired ConditionType = "Required"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go
new file mode 100644
index 0000000..4a14087
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRegistration represents a registration of an external controller.
+type ControllerRegistration struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec contains the specification of this registration.
+ Spec ControllerRegistrationSpec
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRegistrationList is a collection of ControllerRegistrations.
+type ControllerRegistrationList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of ControllerRegistrations.
+ Items []ControllerRegistration
+}
+
+// ControllerRegistrationSpec is the specification of a ControllerRegistration.
+type ControllerRegistrationSpec struct {
+ // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types
+ // (aws-route53, gcp, auditlog, ...).
+ Resources []ControllerResource
+ // Deployment contains information for how this controller is deployed.
+ Deployment *ControllerDeployment
+}
+
+// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this
+// kind (aws-route53, gcp, auditlog, ...).
+type ControllerResource struct {
+ // Kind is the resource kind.
+ Kind string
+ // Type is the resource type.
+ Type string
+ // GloballyEnabled determines if this resource is required by all Shoot clusters.
+ GloballyEnabled *bool
+ // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation.
+ ReconcileTimeout *metav1.Duration
+ // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension
+ // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type
+ // combination.
+ Primary *bool
+}
+
+// ControllerDeployment contains information for how this controller is deployed.
+type ControllerDeployment struct {
+ // Type is the deployment type.
+ Type string
+ // ProviderConfig contains type-specific configuration.
+ ProviderConfig *runtime.RawExtension
+ // Policy controls how the controller is deployed. It defaults to 'OnDemand'.
+ Policy *ControllerDeploymentPolicy
+ // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be
+ // considered for a deployment.
+ // An empty list means that all seeds are selected.
+ SeedSelector *metav1.LabelSelector
+}
+
+// ControllerDeploymentPolicy is a string alias.
+type ControllerDeploymentPolicy string
+
+const (
+ // ControllerDeploymentPolicyOnDemand specifies that the controller shall be only deployed if required by another
+ // resource. If nothing requires it then the controller shall not be deployed.
+ ControllerDeploymentPolicyOnDemand ControllerDeploymentPolicy = "OnDemand"
+ // ControllerDeploymentPolicyAlways specifies that the controller shall be deployed always, independent of whether
+ // another resource requires it or the respective seed has shoots.
+ ControllerDeploymentPolicyAlways ControllerDeploymentPolicy = "Always"
+ // ControllerDeploymentPolicyAlwaysExceptNoShoots specifies that the controller shall be deployed always, independent of
+ // whether another resource requires it, but only when the respective seed has at least one shoot.
+ ControllerDeploymentPolicyAlwaysExceptNoShoots ControllerDeploymentPolicy = "AlwaysExceptNoShoots"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go
new file mode 100644
index 0000000..9919bb7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go
@@ -0,0 +1,104 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Plant represents an external kubernetes cluster.
+type Plant struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec contains the specification of this Plant.
+ Spec PlantSpec
+ // Status contains the status of this Plant.
+ Status PlantStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PlantList is a collection of Plants.
+type PlantList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of Plants.
+ Items []Plant
+}
+
+const (
+ // PlantEveryNodeReady is a constant for a condition type indicating the node health.
+ PlantEveryNodeReady ConditionType = "EveryNodeReady"
+ // PlantAPIServerAvailable is a constant for a condition type indicating that the Plant cluster API server is available.
+ PlantAPIServerAvailable ConditionType = "APIServerAvailable"
+)
+
+// PlantSpec is the specification of a Plant.
+type PlantSpec struct {
+ // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes
+ // clusters to be added to Gardener.
+ SecretRef corev1.LocalObjectReference
+ // Endpoints is the configuration plant endpoints
+ Endpoints []Endpoint
+}
+
+// Endpoint is an endpoint for monitoring, logging and other services around the plant.
+type Endpoint struct {
+ // Name is the name of the endpoint
+ Name string
+ // URL is the url of the endpoint
+ URL string
+ // Purpose is the purpose of the endpoint
+ Purpose string
+}
+
+// PlantStatus is the status of a Plant.
+type PlantStatus struct {
+ // Conditions represents the latest available observations of a Plant's current state.
+ Conditions []Condition
+ // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the
+ // Plant's generation, which is updated on mutation by the API Server.
+ ObservedGeneration *int64
+ // ClusterInfo is additional computed information about the newly added cluster (Plant)
+ ClusterInfo *ClusterInfo
+}
+
+// ClusterInfo contains information about the Plant cluster
+type ClusterInfo struct {
+ // Cloud describes the cloud information
+ Cloud CloudInfo
+ // Kubernetes describes kubernetes meta information (e.g., version)
+ Kubernetes KubernetesInfo
+}
+
+// CloudInfo contains information about the cloud
+type CloudInfo struct {
+ // Type is the cloud type
+ Type string
+ // Region is the cloud region
+ Region string
+}
+
+// KubernetesInfo contains the version and configuration variables for the Plant cluster.
+type KubernetesInfo struct {
+ // Version is the semantic Kubernetes version to use for the Plant cluster.
+ Version string
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go
new file mode 100644
index 0000000..8806303
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Project holds certain properties about a Gardener project.
+type Project struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec defines the project properties.
+ Spec ProjectSpec
+ // Most recently observed status of the Project.
+ Status ProjectStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ProjectList is a collection of Projects.
+type ProjectList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of Projects.
+ Items []Project
+}
+
+// ProjectSpec is the specification of a Project.
+type ProjectSpec struct {
+ // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user
+ // who created the project.
+ CreatedBy *rbacv1.Subject
+ // Description is a human-readable description of what the project is used for.
+ Description *string
+ // Owner is a subject representing a user name, an email address, or any other identifier of a user owning
+ // the project.
+ Owner *rbacv1.Subject
+ // Purpose is a human-readable explanation of the project's purpose.
+ Purpose *string
+ // Members is a list of subjects representing a user name, an email address, or any other identifier of a user,
+ // group, or service account that has a certain role.
+ Members []ProjectMember
+ // Namespace is the name of the namespace that has been created for the Project object.
+ // A nil value means that Gardener will determine the name of the namespace.
+ Namespace *string
+ // Tolerations contains the default tolerations and a list for allowed taints on seed clusters.
+ Tolerations *ProjectTolerations
+}
+
+// ProjectStatus holds the most recently observed status of the project.
+type ProjectStatus struct {
+ // ObservedGeneration is the most recent generation observed for this project.
+ ObservedGeneration int64
+ // Phase is the current phase of the project.
+ Phase ProjectPhase
+ // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused.
+ StaleSinceTimestamp *metav1.Time
+ // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted
+ // because it's stale/unused.
+ StaleAutoDeleteTimestamp *metav1.Time
+}
+
+// ProjectMember is a member of a project.
+type ProjectMember struct {
+ // Subject is representing a user name, an email address, or any other identifier of a user, group, or service
+ // account that has a certain role.
+ rbacv1.Subject
+ // Roles is a list of roles of this member.
+ Roles []string
+}
+
+// ProjectTolerations contains the tolerations for taints on seed clusters.
+type ProjectTolerations struct {
+ // Defaults contains a list of tolerations that are added to the shoots in this project by default.
+ Defaults []Toleration
+ // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note
+ // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources.
+ Whitelist []Toleration
+}
+
+// Toleration is a toleration for a seed taint.
+type Toleration struct {
+ // Key is the toleration key to be applied to a project or shoot.
+ Key string
+ // Value is the toleration value corresponding to the toleration key.
+ Value *string
+}
+
+const (
+ // ProjectMemberAdmin is a const for a role that provides full admin access.
+ ProjectMemberAdmin = "admin"
+ // ProjectMemberOwner is a const for a role that provides full owner access.
+ ProjectMemberOwner = "owner"
+ // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources.
+ ProjectMemberViewer = "viewer"
+ // ProjectMemberUserAccessManager is a const for a role that provides permissions to manage human user(s, (groups)).
+ ProjectMemberUserAccessManager = "uam"
+ // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener.
+ ProjectMemberExtensionPrefix = "extension:"
+)
+
+// ProjectPhase is a label for the condition of a project at the current time.
+type ProjectPhase string
+
+const (
+ // ProjectPending indicates that the project reconciliation is pending.
+ ProjectPending ProjectPhase = "Pending"
+ // ProjectReady indicates that the project reconciliation was successful.
+ ProjectReady ProjectPhase = "Ready"
+ // ProjectFailed indicates that the project reconciliation failed.
+ ProjectFailed ProjectPhase = "Failed"
+ // ProjectTerminating indicates that the project is in termination process.
+ ProjectTerminating ProjectPhase = "Terminating"
+
+ // ProjectEventNamespaceReconcileFailed indicates that the namespace reconciliation has failed.
+ ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed"
+ // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded.
+ ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful"
+ // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed.
+ ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed"
+ // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion.
+ ProjectEventNamespaceMarkedForDeletion = "NamespaceMarkedForDeletion"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go
new file mode 100644
index 0000000..bf9e0a9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Quota struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec defines the Quota constraints.
+ Spec QuotaSpec
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// QuotaList is a collection of Quotas.
+type QuotaList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of Quotas.
+ Items []Quota
+}
+
+// QuotaSpec is the specification of a Quota.
+type QuotaSpec struct {
+ // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically.
+ ClusterLifetimeDays *int32
+ // Metrics is a list of resources which will be put under constraints.
+ Metrics corev1.ResourceList
+ // Scope is the scope of the Quota object, either 'project' or 'secret'.
+ Scope corev1.ObjectReference
+}
+
+const (
+ // QuotaMetricCPU is the constraint for the amount of CPUs
+ QuotaMetricCPU corev1.ResourceName = corev1.ResourceCPU
+ // QuotaMetricGPU is the constraint for the amount of GPUs (e.g. from Nvidia)
+ QuotaMetricGPU corev1.ResourceName = "gpu"
+ // QuotaMetricMemory is the constraint for the amount of memory
+ QuotaMetricMemory corev1.ResourceName = corev1.ResourceMemory
+ // QuotaMetricStorageStandard is the constraint for the size of a standard disk
+ QuotaMetricStorageStandard corev1.ResourceName = corev1.ResourceStorage + ".standard"
+ // QuotaMetricStoragePremium is the constraint for the size of a premium disk (e.g. SSD)
+ QuotaMetricStoragePremium corev1.ResourceName = corev1.ResourceStorage + ".premium"
+ // QuotaMetricLoadbalancer is the constraint for the amount of loadbalancers
+ QuotaMetricLoadbalancer corev1.ResourceName = "loadbalancer"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go
new file mode 100644
index 0000000..9732d38
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type SecretBinding struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // SecretRef is a reference to a secret object in the same or another namespace.
+ SecretRef corev1.SecretReference
+ // Quotas is a list of references to Quota objects in the same or another namespace.
+ Quotas []corev1.ObjectReference
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SecretBindingList is a collection of SecretBindings.
+type SecretBindingList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of SecretBindings.
+ Items []SecretBinding
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go
new file mode 100644
index 0000000..6619ff8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go
@@ -0,0 +1,284 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Seed represents an installation request for an external controller.
+type Seed struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Spec contains the specification of this installation.
+ Spec SeedSpec
+ // Status contains the status of this installation.
+ Status SeedStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SeedList is a collection of Seeds.
+type SeedList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of Seeds.
+ Items []Seed
+}
+
+// SeedSpec is the specification of a Seed.
+type SeedSpec struct {
+ // Backup holds the object store configuration for the backups of shoot (currently only etcd).
+ // If it is not specified, then there won't be any backups taken for shoots associated with this seed.
+ // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored
+ // under the configured object store.
+ Backup *SeedBackup
+ // DNS contains DNS-relevant information about this seed cluster.
+ DNS SeedDNS
+ // Networks defines the pod, service and worker network of the Seed cluster.
+ Networks SeedNetworks
+ // Provider defines the provider type and region for this Seed cluster.
+ Provider SeedProvider
+ // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for
+ // the account the Seed cluster has been deployed to.
+ SecretRef *corev1.SecretReference
+ // Settings contains certain settings for this seed cluster.
+ Settings *SeedSettings
+ // Taints describes taints on the seed.
+ Taints []SeedTaint
+ // Volume contains settings for persistentvolumes created in the seed cluster.
+ Volume *SeedVolume
+ // Ingress configures Ingress specific settings of the Seed cluster.
+ Ingress *Ingress
+}
+
+func (s *Seed) GetProviderType() string {
+ return s.Spec.Provider.Type
+}
+
+// SeedStatus is the status of a Seed.
+type SeedStatus struct {
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ Gardener *Gardener
+ // KubernetesVersion is the Kubernetes version of the seed cluster.
+ KubernetesVersion *string
+ // Conditions represents the latest available observations of a Seed's current state.
+ Conditions []Condition
+ // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the
+ // Seed's generation, which is updated on mutation by the API Server.
+ ObservedGeneration int64
+ // ClusterIdentity is the identity of Seed cluster
+ ClusterIdentity *string
+ // Capacity represents the total resources of a seed.
+ Capacity corev1.ResourceList
+ // Allocatable represents the resources of a seed that are available for scheduling.
+ // Defaults to Capacity.
+ Allocatable corev1.ResourceList
+}
+
+// SeedBackup contains the object store configuration for backups for shoot (currently only etcd).
+type SeedBackup struct {
+ // Provider is a provider name.
+ Provider string
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ ProviderConfig *runtime.RawExtension
+ // Region is a region name.
+ Region *string
+ // SecretRef is a reference to a Secret object containing the cloud provider credentials for
+ // the object store where backups should be stored. It should have enough privileges to manipulate
+ // the objects as well as buckets.
+ SecretRef corev1.SecretReference
+}
+
+// SeedDNS contains the external domain and configuration for the DNS provider
+type SeedDNS struct {
+ // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ // This will be removed in the next API version and replaced by spec.ingress.domain.
+ IngressDomain *string
+ // Provider configures a DNSProvider
+ Provider *SeedDNSProvider
+}
+
+// SeedDNSProvider configures a DNS provider
+type SeedDNSProvider struct {
+ // Type describes the type of the dns-provider, for example `aws-route53`
+ Type string
+ // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains.
+ SecretRef corev1.SecretReference
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ Domains *DNSIncludeExclude
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ Zones *DNSIncludeExclude
+}
+
+// Ingress configures the Ingress specific settings of the Seed cluster
+type Ingress struct {
+ // Domain specifies the ingress domain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ Domain string
+ // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain
+ Controller IngressController
+}
+
+// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain
+type IngressController struct {
+ // Kind defines which kind of IngressController to use, for example `nginx`
+ Kind string
+ // ProviderConfig specifies infrastructure specific configuration for the ingressController
+ ProviderConfig *runtime.RawExtension
+}
+
+// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster.
+type SeedNetworks struct {
+ // Nodes is the CIDR of the node network.
+ Nodes *string
+ // Pods is the CIDR of the pod network.
+ Pods string
+ // Services is the CIDR of the service network.
+ Services string
+ // ShootDefaults contains the default networks CIDRs for shoots.
+ ShootDefaults *ShootNetworks
+ // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running
+ // in the seed cluster.
+ BlockCIDRs []string
+}
+
+// ShootNetworks contains the default networks CIDRs for shoots.
+type ShootNetworks struct {
+ // Pods is the CIDR of the pod network.
+ Pods *string
+ // Services is the CIDR of the service network.
+ Services *string
+}
+
+// SeedProvider defines the provider type and region for this Seed cluster.
+type SeedProvider struct {
+ // Type is the name of the provider.
+ Type string
+ // ProviderConfig is the configuration passed to Seed resource.
+ ProviderConfig *runtime.RawExtension
+ // Region is a name of a region.
+ Region string
+}
+
+// SeedSettings contains certain settings for this seed cluster.
+type SeedSettings struct {
+ // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed.
+ ExcessCapacityReservation *SeedSettingExcessCapacityReservation
+ // Scheduling controls settings for scheduling decisions for the seed.
+ Scheduling *SeedSettingScheduling
+ // ShootDNS controls the shoot DNS settings for the seed.
+ ShootDNS *SeedSettingShootDNS
+ // LoadBalancerServices controls certain settings for services of type load balancer that are created in the
+ // seed.
+ LoadBalancerServices *SeedSettingLoadBalancerServices
+ // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed.
+ VerticalPodAutoscaler *SeedSettingVerticalPodAutoscaler
+}
+
+// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the
+// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11
+// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled.
+type SeedSettingExcessCapacityReservation struct {
+ // Enabled controls whether the excess capacity reservation should be enabled.
+ Enabled bool
+}
+
+// SeedSettingShootDNS controls the shoot DNS settings for the seed.
+type SeedSettingShootDNS struct {
+ // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the
+ // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here.
+ // This is useful for environments where DNS is not required.
+ Enabled bool
+}
+
+// SeedSettingScheduling controls settings for scheduling decisions for the seed.
+type SeedSettingScheduling struct {
+ // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds
+ // are not considered by the scheduler.
+ Visible bool
+}
+
+// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the
+// seed.
+type SeedSettingLoadBalancerServices struct {
+ // Annotations is a map of annotations that will be injected/merged into every load balancer service object.
+ Annotations map[string]string
+}
+
+// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the
+// seed.
+type SeedSettingVerticalPodAutoscaler struct {
+ // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It
+ // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if
+ // your seed cluster already has another, manually/custom managed VPA deployment.
+ Enabled bool
+}
+
+// SeedTaint describes a taint on a seed.
+type SeedTaint struct {
+ // Key is the taint key to be applied to a seed.
+ Key string
+ // Value is the taint value corresponding to the taint key.
+ Value *string
+}
+
+const (
+ // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds
+ // may only be used by shoots in the `garden` namespace.
+ SeedTaintProtected = "seed.gardener.cloud/protected"
+)
+
+// SeedVolume contains settings for persistentvolumes created in the seed cluster.
+type SeedVolume struct {
+ // MinimumSize defines the minimum size that should be used for PVCs in the seed.
+ MinimumSize *resource.Quantity
+ // Providers is a list of storage class provisioner types for the seed.
+ Providers []SeedVolumeProvider
+}
+
+// SeedVolumeProvider is a storage class provisioner type.
+type SeedVolumeProvider struct {
+ // Purpose is the purpose of this provider.
+ Purpose string
+ // Name is the name of the storage class provisioner type.
+ Name string
+}
+
+const (
+ // SeedBootstrapped is a constant for a condition type indicating that the seed cluster has been
+ // bootstrapped.
+ SeedBootstrapped ConditionType = "Bootstrapped"
+ // SeedExtensionsReady is a constant for a condition type indicating that the extensions are ready.
+ SeedExtensionsReady ConditionType = "ExtensionsReady"
+ // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready.
+ SeedGardenletReady ConditionType = "GardenletReady"
+)
+
+// Resource constants for Gardener object types
+const (
+ // ResourceShoots is a resource constant for the number of shoots.
+ ResourceShoots corev1.ResourceName = "shoots"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go
new file mode 100644
index 0000000..b3f2b6d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go
@@ -0,0 +1,967 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "time"
+
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Shoot struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Specification of the Shoot cluster.
+ Spec ShootSpec
+ // Most recently observed status of the Shoot cluster.
+ Status ShootStatus
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootList is a list of Shoot objects.
+type ShootList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of Shoots.
+ Items []Shoot
+}
+
+// ShootSpec is the specification of a Shoot.
+type ShootSpec struct {
+ // Addons contains information about enabled/disabled addons and their configuration.
+ Addons *Addons
+ // CloudProfileName is a name of a CloudProfile object.
+ CloudProfileName string
+ // DNS contains information about the DNS settings of the Shoot.
+ DNS *DNS
+ // Extensions contain type and provider information for Shoot extensions.
+ Extensions []Extension
+ // Hibernation contains information whether the Shoot is suspended or not.
+ Hibernation *Hibernation
+ // Kubernetes contains the version and configuration settings of the control plane components.
+ Kubernetes Kubernetes
+ // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc.
+ Networking Networking
+ // Maintenance contains information about the time window for maintenance operations and which
+ // operations should be performed.
+ Maintenance *Maintenance
+ // Monitoring contains information about custom monitoring configurations for the shoot.
+ Monitoring *Monitoring
+ // Provider contains all provider-specific and provider-relevant information.
+ Provider Provider
+ // Purpose is the purpose class for this cluster.
+ Purpose *ShootPurpose
+ // Region is a name of a region.
+ Region string
+ // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret.
+ // The credentials inside the provider secret will be used to create the shoot in the respective account.
+ SecretBindingName string
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot.
+ SeedName *string
+ // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed.
+ SeedSelector *SeedSelector
+ // Resources holds a list of named resource references that can be referred to in extension configs by their names.
+ Resources []NamedResourceReference
+ // Tolerations contains the tolerations for taints on seed clusters.
+ Tolerations []Toleration
+}
+
+func (s *Shoot) GetProviderType() string {
+ return s.Spec.Provider.Type
+}
+
+// ShootStatus holds the most recently observed status of the Shoot cluster.
+type ShootStatus struct {
+ // Conditions represents the latest available observations of a Shoots's current state.
+ Conditions []Condition
+ // Constraints represents conditions of a Shoot's current state that constraint some operations on it.
+ Constraints []Condition
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ Gardener Gardener
+ // IsHibernated indicates whether the Shoot is currently hibernated.
+ IsHibernated bool
+ // LastOperation holds information about the last operation on the Shoot.
+ LastOperation *LastOperation
+ // LastErrors holds information about the last occurred error(s) during an operation.
+ LastErrors []LastError
+ // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the
+ // Shoot's generation, which is updated on mutation by the API Server.
+ ObservedGeneration int64
+ // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation
+ // must be retried until we give up).
+ RetryCycleStartTime *metav1.Time
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot. This value is only written
+ // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds.
+ SeedName *string
+ // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and
+ // basically everything that is related to this particular Shoot.
+ TechnicalID string
+ // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters.
+ // It is used to compute unique hashes.
+ UID types.UID
+ // ClusterIdentity is the identity of the Shoot cluster
+ ClusterIdentity *string
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Addons relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Addons is a collection of configuration for specific addons which are managed by the Gardener.
+type Addons struct {
+ // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon.
+ KubernetesDashboard *KubernetesDashboard
+ // NginxIngress holds configuration settings for the nginx-ingress addon.
+ NginxIngress *NginxIngress
+}
+
+// Addon allows enabling or disabling a specific addon and is used to derive from.
+type Addon struct {
+ // Enabled indicates whether the addon is enabled or not.
+ Enabled bool
+}
+
+// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon.
+type KubernetesDashboard struct {
+ Addon
+ // AuthenticationMode defines the authentication mode for the kubernetes-dashboard.
+ AuthenticationMode *string
+}
+
+const (
+ // KubernetesDashboardAuthModeBasic uses basic authentication mode for auth.
+ KubernetesDashboardAuthModeBasic = "basic"
+ // KubernetesDashboardAuthModeToken uses token-based mode for auth.
+ KubernetesDashboardAuthModeToken = "token"
+)
+
+// NginxIngress describes configuration values for the nginx-ingress addon.
+type NginxIngress struct {
+ Addon
+ // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress
+ LoadBalancerSourceRanges []string
+ // Config contains custom configuration for the nginx-ingress-controller configuration.
+ // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options
+ Config map[string]string
+ // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service`
+ // exposing the nginx-ingress. Defaults to `Cluster`.
+ ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// DNS relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// DNS holds information about the provider, the hosted zone id and the domain.
+type DNS struct {
+ // Domain is the external available domain of the Shoot cluster. This domain will be written into the
+ // kubeconfig that is handed out to end-users. Once set it is immutable.
+ Domain *string
+ // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if
+ // not a default domain is used.
+ Providers []DNSProvider
+}
+
+// DNSProvider contains information about a DNS provider.
+type DNSProvider struct {
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ Domains *DNSIncludeExclude
+ // Primary indicates that this DNSProvider is used for shoot related domains.
+ Primary *bool
+ // SecretName is a name of a secret containing credentials for the stated domain and the
+ // provider. When not specified, the Gardener will use the cloud provider credentials referenced
+ // by the Shoot and try to find respective credentials there. Specifying this field may override
+ // this behavior, i.e. forcing the Gardener to only look into the given secret.
+ SecretName *string
+ // Type is the DNS provider type for the Shoot. Only relevant if not the default domain is used for
+ // this shoot.
+ Type *string
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ Zones *DNSIncludeExclude
+}
+
+type DNSIncludeExclude struct {
+ // Include is a list of resources that shall be included.
+ Include []string
+ // Exclude is a list of resources that shall be excluded.
+ Exclude []string
+}
+
+// DefaultDomain is the default value in the Shoot's '.spec.dns.domain' when '.spec.dns.provider' is 'unmanaged'
+const DefaultDomain = "cluster.local"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Extension relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Extension contains type and provider information for Shoot extensions.
+type Extension struct {
+ // Type is the type of the extension resource.
+ Type string
+ // ProviderConfig is the configuration passed to extension resource.
+ ProviderConfig *runtime.RawExtension
+ // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators.
+ Disabled *bool
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// NamedResourceReference relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// NamedResourceReference is a named reference to a resource.
+type NamedResourceReference struct {
+ // Name of the resource reference.
+ Name string
+ // ResourceRef is a reference to a resource.
+ ResourceRef autoscalingv1.CrossVersionObjectReference
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Hibernation relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Hibernation contains information whether the Shoot is suspended or not.
+type Hibernation struct {
+ // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated.
+ // If it is false or nil, the Shoot's desired state is to be awaken.
+ Enabled *bool
+ // Schedules determine the hibernation schedules.
+ Schedules []HibernationSchedule
+}
+
+// HibernationSchedule determines the hibernation schedule of a Shoot.
+// A Shoot will be regularly hibernated at each start time and will be woken up at each end time.
+// Start or End can be omitted, though at least one of each has to be specified.
+type HibernationSchedule struct {
+ // Start is a Cron spec at which time a Shoot will be hibernated.
+ Start *string
+ // End is a Cron spec at which time a Shoot will be woken up.
+ End *string
+ // Location is the time location in which both start and and shall be evaluated.
+ Location *string
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Kubernetes relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Kubernetes contains the version and configuration variables for the Shoot control plane.
+type Kubernetes struct {
+ // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true).
+ AllowPrivilegedContainers *bool
+ // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+ ClusterAutoscaler *ClusterAutoscaler
+ // KubeAPIServer contains configuration settings for the kube-apiserver.
+ KubeAPIServer *KubeAPIServerConfig
+ // KubeControllerManager contains configuration settings for the kube-controller-manager.
+ KubeControllerManager *KubeControllerManagerConfig
+ // KubeScheduler contains configuration settings for the kube-scheduler.
+ KubeScheduler *KubeSchedulerConfig
+ // KubeProxy contains configuration settings for the kube-proxy.
+ KubeProxy *KubeProxyConfig
+ // Kubelet contains configuration settings for the kubelet.
+ Kubelet *KubeletConfig
+ // Version is the semantic Kubernetes version to use for the Shoot cluster.
+ Version string
+ // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+ VerticalPodAutoscaler *VerticalPodAutoscaler
+}
+
+// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+type ClusterAutoscaler struct {
+ // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour).
+ ScaleDownDelayAfterAdd *metav1.Duration
+ // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval).
+ ScaleDownDelayAfterDelete *metav1.Duration
+ // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins).
+ ScaleDownDelayAfterFailure *metav1.Duration
+ // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins).
+ ScaleDownUnneededTime *metav1.Duration
+ // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed
+ ScaleDownUtilizationThreshold *float64
+ // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs).
+ ScanInterval *metav1.Duration
+}
+
+// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+type VerticalPodAutoscaler struct {
+ // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster.
+ Enabled bool
+ // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given
+ // threshold since its start and if it has only one container (default: 10m0s).
+ EvictAfterOOMThreshold *metav1.Duration
+ // EvictionRateBurst defines the burst of pods that can be evicted (default: 1)
+ EvictionRateBurst *int32
+ // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will
+ // disable the rate limiter (default: -1).
+ EvictionRateLimit *float64
+ // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one
+ // pod can be evicted (default: 0.5).
+ EvictionTolerance *float64
+ // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request
+ // (default: 0.15).
+ RecommendationMarginFraction *float64
+ // UpdaterInterval is the interval how often the updater should run (default: 1m0s).
+ UpdaterInterval *metav1.Duration
+ // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s).
+ RecommenderInterval *metav1.Duration
+}
+
+// KubernetesConfig contains common configuration fields for the control plane components.
+type KubernetesConfig struct {
+ // FeatureGates contains information about enabled feature gates.
+ FeatureGates map[string]bool
+}
+
+// KubeAPIServerConfig contains configuration settings for the kube-apiserver.
+type KubeAPIServerConfig struct {
+ KubernetesConfig
+ // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding
+ // configuration.
+ AdmissionPlugins []AdmissionPlugin
+ // APIAudiences are the identifiers of the API. The service account token authenticator will
+ // validate that tokens used against the API are bound to at least one of these audiences.
+ // Defaults to ["kubernetes"].
+ APIAudiences []string
+ // AuditConfig contains configuration settings for the audit of the kube-apiserver.
+ AuditConfig *AuditConfig
+ // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not.
+ EnableBasicAuthentication *bool
+ // OIDCConfig contains configuration settings for the OIDC provider.
+ OIDCConfig *OIDCConfig
+ // RuntimeConfig contains information about enabled or disabled APIs.
+ RuntimeConfig map[string]bool
+ // ServiceAccountConfig contains configuration settings for the service account handling
+ // of the kube-apiserver.
+ ServiceAccountConfig *ServiceAccountConfig
+ // WatchCacheSizes contains configuration of the API server's watch cache sizes.
+ // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests
+ // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's
+ // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it
+ // might happen that controller watches are permanently stopped with `too old resource version` errors.
+ // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch
+ // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache).
+ WatchCacheSizes *WatchCacheSizes
+ // Requests contains configuration for request-specific settings for the kube-apiserver.
+ Requests *KubeAPIServerRequests
+}
+
+// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver.
+type KubeAPIServerRequests struct {
+ // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ MaxNonMutatingInflight *int32
+ // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ MaxMutatingInflight *int32
+}
+
+// ServiceAccountConfig is the kube-apiserver configuration for service accounts.
+type ServiceAccountConfig struct {
+ // Issuer is the identifier of the service account token issuer. The issuer will assert this
+ // identifier in "iss" claim of issued tokens. This value is a string or URI.
+ // Defaults to URI of the API server.
+ Issuer *string
+ // SigningKeySecret is a reference to a secret that contains an optional private key of the
+ // service account token issuer. The issuer will sign issued ID tokens with this private key.
+ // Only useful if service account tokens are also issued by another external system.
+ SigningKeySecret *corev1.LocalObjectReference
+}
+
+// AuditConfig contains settings for audit of the api server
+type AuditConfig struct {
+ // AuditPolicy contains configuration settings for audit policy of the kube-apiserver.
+ AuditPolicy *AuditPolicy
+}
+
+// AuditPolicy contains audit policy for kube-apiserver
+type AuditPolicy struct {
+ // ConfigMapRef is a reference to a ConfigMap object in the same namespace,
+ // which contains the audit policy for the kube-apiserver.
+ ConfigMapRef *corev1.ObjectReference
+}
+
+// OIDCConfig contains configuration settings for the OIDC provider.
+// Note: Descriptions were taken from the Kubernetes documentation.
+type OIDCConfig struct {
+ // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.
+ CABundle *string
+ // ClientAuthentication can optionally contain client configuration used for kubeconfig generation.
+ ClientAuthentication *OpenIDConnectClientAuthentication
+ // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.
+ ClientID *string
+ // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.
+ GroupsClaim *string
+ // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.
+ GroupsPrefix *string
+ // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).
+ IssuerURL *string
+ // ATTENTION: Only meaningful for Kubernetes >= 1.11
+ // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value.
+ RequiredClaims map[string]string
+ // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1
+ SigningAlgs []string
+ // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub")
+ UsernameClaim *string
+ // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
+ UsernamePrefix *string
+}
+
+// OpenIDConnectClientAuthentication contains configuration for OIDC clients.
+type OpenIDConnectClientAuthentication struct {
+ // Extra configuration added to kubeconfig's auth-provider.
+ // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token
+ ExtraConfig map[string]string
+ // The client Secret for the OpenID Connect client.
+ Secret *string
+}
+
+// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration.
+type AdmissionPlugin struct {
+ // Name is the name of the plugin.
+ Name string
+ // Config is the configuration of the plugin.
+ Config *runtime.RawExtension
+}
+
+// WatchCacheSizes contains configuration of the API server's watch cache sizes.
+type WatchCacheSizes struct {
+ // Default configures the default watch cache size of the kube-apiserver
+ // (flag `--default-watch-cache-size`, defaults to 100).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ Default *int32
+ // Resources configures the watch cache size of the kube-apiserver per resource
+ // (flag `--watch-cache-sizes`).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ Resources []ResourceWatchCacheSize
+}
+
+// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource.
+type ResourceWatchCacheSize struct {
+ // APIGroup is the API group of the resource for which the watch cache size should be configured.
+ // An unset value is used to specify the legacy core API (e.g. for `secrets`).
+ APIGroup *string
+ // Resource is the name of the resource for which the watch cache size should be configured
+ // (in lowercase plural form, e.g. `secrets`).
+ Resource string
+ // CacheSize specifies the watch cache size that should be configured for the specified resource.
+ CacheSize int32
+}
+
+// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.
+type KubeControllerManagerConfig struct {
+ KubernetesConfig
+ // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+ HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig
+ // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24)
+ NodeCIDRMaskSize *int32
+ // PodEvictionTimeout defines the grace period for deleting pods on failed nodes.
+ PodEvictionTimeout *metav1.Duration
+}
+
+// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+// Note: Descriptions were taken from the Kubernetes documentation.
+type HorizontalPodAutoscalerConfig struct {
+ // The period after which a ready pod transition is considered to be the first.
+ CPUInitializationPeriod *metav1.Duration
+ // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.
+ DownscaleDelay *metav1.Duration
+ // The configurable window at which the controller will choose the highest recommendation for autoscaling.
+ DownscaleStabilization *metav1.Duration
+ // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time.
+ InitialReadinessDelay *metav1.Duration
+ // The period for syncing the number of pods in horizontal pod autoscaler.
+ SyncPeriod *metav1.Duration
+ // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.
+ Tolerance *float64
+ // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.
+ UpscaleDelay *metav1.Duration
+}
+
+const (
+ // DefaultHPADownscaleDelay is a constant for the default HPA downscale delay for a Shoot cluster.
+ DefaultHPADownscaleDelay = 15 * time.Minute
+ // DefaultHPASyncPeriod is a constant for the default HPA sync period for a Shoot cluster.
+ DefaultHPASyncPeriod = 30 * time.Second
+ // DefaultHPATolerance is a constant for the default HPA tolerance for a Shoot cluster.
+ DefaultHPATolerance = 0.1
+ // DefaultHPAUpscaleDelay is for the default HPA upscale delay for a Shoot cluster.
+ DefaultHPAUpscaleDelay = 1 * time.Minute
+ // DefaultDownscaleStabilization is the default HPA downscale stabilization window for a Shoot cluster
+ DefaultDownscaleStabilization = 5 * time.Minute
+ // DefaultInitialReadinessDelay is for the default HPA ReadinessDelay value in the Shoot cluster
+ DefaultInitialReadinessDelay = 30 * time.Second
+ // DefaultCPUInitializationPeriod is the for the default value of the CPUInitializationPeriod in the Shoot cluster
+ DefaultCPUInitializationPeriod = 5 * time.Minute
+)
+
+// KubeSchedulerConfig contains configuration settings for the kube-scheduler.
+type KubeSchedulerConfig struct {
+ KubernetesConfig
+ // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler.
+ // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits
+ // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware
+ // of all the side-effects and consequences when changing it.
+ KubeMaxPDVols *string
+}
+
+// KubeProxyConfig contains configuration settings for the kube-proxy.
+type KubeProxyConfig struct {
+ KubernetesConfig
+ // Mode specifies which proxy mode to use.
+ // defaults to IPTables.
+ Mode *ProxyMode
+}
+
+// ProxyMode available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
+// (newer, faster), 'ipvs' (newest, better in performance and scalability).
+// As of now only 'iptables' and 'ipvs' is supported by Gardener.
+// In Linux platform, if the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
+// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
+// and the fall back path is firstly iptables and then userspace.
+type ProxyMode string
+
+const (
+ // ProxyModeIPTables uses iptables as proxy implementation.
+ ProxyModeIPTables ProxyMode = "IPTables"
+ // ProxyModeIPVS uses ipvs as proxy implementation.
+ ProxyModeIPVS ProxyMode = "IPVS"
+)
+
+// KubeletConfig contains configuration settings for the kubelet.
+type KubeletConfig struct {
+ KubernetesConfig
+ // CPUCFSQuota allows you to disable/enable CPU throttling for Pods.
+ CPUCFSQuota *bool
+ // CPUManagerPolicy allows to set alternative CPU management policies (default: none).
+ CPUManagerPolicy *string
+ // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction.
+ // Default:
+ // memory.available: "100Mi/1Gi/5%"
+ // nodefs.available: "5%"
+ // nodefs.inodesFree: "5%"
+ // imagefs.available: "5%"
+ // imagefs.inodesFree: "5%"
+ EvictionHard *KubeletConfigEviction
+ // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
+ // Default: 90
+ EvictionMaxPodGracePeriod *int32
+ // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure.
+ // Default: 0 for each resource
+ EvictionMinimumReclaim *KubeletConfigEvictionMinimumReclaim
+ // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
+ // Default: 4m0s
+ EvictionPressureTransitionPeriod *metav1.Duration
+ // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction.
+ // Default:
+ // memory.available: "200Mi/1.5Gi/10%"
+ // nodefs.available: "10%"
+ // nodefs.inodesFree: "10%"
+ // imagefs.available: "10%"
+ // imagefs.inodesFree: "10%"
+ EvictionSoft *KubeletConfigEviction
+ // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction.
+ // Default:
+ // memory.available: 1m30s
+ // nodefs.available: 1m30s
+ // nodefs.inodesFree: 1m30s
+ // imagefs.available: 1m30s
+ // imagefs.inodesFree: 1m30s
+ EvictionSoftGracePeriod *KubeletConfigEvictionSoftGracePeriod
+ // MaxPods is the maximum number of Pods that are allowed by the Kubelet.
+ // Default: 110
+ MaxPods *int32
+ // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet.
+ PodPIDsLimit *int64
+ // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled.
+ // Default: 1m
+ ImagePullProgressDeadline *metav1.Duration
+ // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true).
+ FailSwapOn *bool
+ // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // Default: cpu=80m,memory=1Gi,pid=20k
+ KubeReserved *KubeletConfigReserved
+ // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ SystemReserved *KubeletConfigReserved
+}
+
+// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value.
+type KubeletConfigEviction struct {
+ // MemoryAvailable is the threshold for the free memory on the host server.
+ MemoryAvailable *string
+ // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers).
+ ImageFSAvailable *string
+ // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem.
+ ImageFSInodesFree *string
+ // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc).
+ NodeFSAvailable *string
+ // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem.
+ NodeFSInodesFree *string
+}
+
+// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim.
+type KubeletConfigEvictionMinimumReclaim struct {
+ // MemoryAvailable is the threshold for the memory reclaim on the host server.
+ MemoryAvailable *resource.Quantity
+ // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers).
+ ImageFSAvailable *resource.Quantity
+ // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem.
+ ImageFSInodesFree *resource.Quantity
+ // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc).
+ NodeFSAvailable *resource.Quantity
+ // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem.
+ NodeFSInodesFree *resource.Quantity
+}
+
+// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds.
+type KubeletConfigEvictionSoftGracePeriod struct {
+ // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold.
+ MemoryAvailable *metav1.Duration
+ // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold.
+ ImageFSAvailable *metav1.Duration
+ // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold.
+ ImageFSInodesFree *metav1.Duration
+ // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold.
+ NodeFSAvailable *metav1.Duration
+ // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold.
+ NodeFSInodesFree *metav1.Duration
+}
+
+// KubeletConfigReserved contains reserved resources for daemons
+type KubeletConfigReserved struct {
+ // CPU is the reserved cpu.
+ CPU *resource.Quantity
+ // Memory is the reserved memory.
+ Memory *resource.Quantity
+ // EphemeralStorage is the reserved ephemeral-storage.
+ EphemeralStorage *resource.Quantity
+ // PID is the reserved process-ids.
+ // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15.
+ PID *resource.Quantity
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Networking relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Networking defines networking parameters for the shoot cluster.
+type Networking struct {
+ // Type identifies the type of the networking plugin.
+ Type string
+ // ProviderConfig is the configuration passed to network resource.
+ ProviderConfig *runtime.RawExtension
+ // Pods is the CIDR of the pod network.
+ Pods *string
+ // Nodes is the CIDR of the entire node network.
+ Nodes *string
+ // Services is the CIDR of the service network.
+ Services *string
+}
+
+const (
+ // DefaultPodNetworkCIDR is a constant for the default pod network CIDR of a Shoot cluster.
+ DefaultPodNetworkCIDR = "100.96.0.0/11"
+ // DefaultServiceNetworkCIDR is a constant for the default service network CIDR of a Shoot cluster.
+ DefaultServiceNetworkCIDR = "100.64.0.0/13"
+)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Maintenance relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+const (
+ // MaintenanceTimeWindowDurationMinimum is the minimum duration for a maintenance time window.
+ MaintenanceTimeWindowDurationMinimum = 30 * time.Minute
+ // MaintenanceTimeWindowDurationMaximum is the maximum duration for a maintenance time window.
+ MaintenanceTimeWindowDurationMaximum = 6 * time.Hour
+)
+
+// Maintenance contains information about the time window for maintenance operations and which
+// operations should be performed.
+type Maintenance struct {
+ // AutoUpdate contains information about which constraints should be automatically updated.
+ AutoUpdate *MaintenanceAutoUpdate
+ // TimeWindow contains information about the time window for maintenance operations.
+ TimeWindow *MaintenanceTimeWindow
+ // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately.
+ // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger
+ // an immediate roll out which is changes to the Spec.Hibernation.Enabled field.
+ ConfineSpecUpdateRollout *bool
+}
+
+// MaintenanceAutoUpdate contains information about which constraints should be automatically updated.
+type MaintenanceAutoUpdate struct {
+ // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true).
+ KubernetesVersion bool
+ // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true).
+ MachineImageVersion bool
+}
+
+// MaintenanceTimeWindow contains information about the time window for maintenance operations.
+type MaintenanceTimeWindow struct {
+ // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, a random value will be computed.
+ Begin string
+ // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, the value will be computed based on the "Begin" value.
+ End string
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Monitoring relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Monitoring contains information about the monitoring configuration for the shoot.
+type Monitoring struct {
+ // Alerting contains information about the alerting configuration for the shoot cluster.
+ Alerting *Alerting
+}
+
+// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how).
+type Alerting struct {
+ // MonitoringEmailReceivers is a list of recipients for alerts
+ EmailReceivers []string
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Provider relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Provider contains provider-specific information that are handed-over to the provider-specific
+// extension controller.
+type Provider struct {
+ // Type is the type of the provider.
+ Type string
+ // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ ControlPlaneConfig *runtime.RawExtension
+ // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ InfrastructureConfig *runtime.RawExtension
+ // Workers is a list of worker groups.
+ Workers []Worker
+}
+
+// Worker is the base definition of a worker group.
+type Worker struct {
+ // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool.
+ Annotations map[string]string
+ // CABundle is a certificate bundle which will be installed onto every machine of this worker pool.
+ CABundle *string
+ // CRI contains configurations of CRI support of every machine in the worker pool
+ CRI *CRI
+ // Kubernetes contains configuration for Kubernetes components related to this worker pool.
+ Kubernetes *WorkerKubernetes
+ // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool.
+ Labels map[string]string
+ // Name is the name of the worker group.
+ Name string
+ // Machine contains information about the machine type and image.
+ Machine Machine
+ // Maximum is the maximum number of VMs to create.
+ Maximum int32
+ // Minimum is the minimum number of VMs to create.
+ Minimum int32
+ // MaxSurge is maximum number of VMs that are created during an update.
+ MaxSurge *intstr.IntOrString
+ // MaxUnavailable is the maximum number of VMs that can be unavailable during an update.
+ MaxUnavailable *intstr.IntOrString
+ // ProviderConfig is the provider-specific configuration for this worker pool.
+ ProviderConfig *runtime.RawExtension
+ // SystemComponents contains configuration for system components related to this worker pool
+ SystemComponents *WorkerSystemComponents
+ // Taints is a list of taints for all the `Node` objects in this worker pool.
+ Taints []corev1.Taint
+ // Volume contains information about the volume type and size.
+ Volume *Volume
+ // DataVolumes contains a list of additional worker volumes.
+ DataVolumes []DataVolume
+ // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state.
+ KubeletDataVolumeName *string
+ // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional
+ // as not every provider may support availability zones.
+ Zones []string
+ // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+ MachineControllerManagerSettings *MachineControllerManagerSettings
+}
+
+// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+type MachineControllerManagerSettings struct {
+ // MachineDrainTimeout is the period after which machine is forcefully deleted.
+ MachineDrainTimeout *metav1.Duration
+ // MachineHealthTimeout is the period after which machine is declared failed.
+ MachineHealthTimeout *metav1.Duration
+ // MachineCreationTimeout is the period after which creation of the machine is declared failed.
+ MachineCreationTimeout *metav1.Duration
+ // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered.
+ MaxEvictRetries *int32
+ // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed.
+ NodeConditions []string
+}
+
+// WorkerSystemComponents contains configuration for system components related to this worker pool
+type WorkerSystemComponents struct {
+ // Allow determines whether the pool should be allowed to host system components or not (defaults to true)
+ Allow bool
+}
+
+// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool.
+type WorkerKubernetes struct {
+ // Kubelet contains configuration settings for all kubelets of this worker pool.
+ Kubelet *KubeletConfig
+}
+
+// Machine contains information about the machine type and image.
+type Machine struct {
+ // Type is the machine type of the worker group.
+ Type string
+ // Image holds information about the machine image to use for all nodes of this pool. It will default to the
+ // latest version of the first image stated in the referenced CloudProfile if no value has been provided.
+ Image *ShootMachineImage
+}
+
+// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be
+// defined in the respective CloudProfile.
+type ShootMachineImage struct {
+ // Name is the name of the image.
+ Name string
+ // ProviderConfig is the shoot's individual configuration passed to an extension resource.
+ ProviderConfig *runtime.RawExtension
+ // Version is the version of the shoot's image.
+ // If version is not provided, it will be defaulted to the latest version from the CloudProfile.
+ Version string
+}
+
+// Volume contains information about the volume type and size.
+type Volume struct {
+ // Name of the volume to make it referencable.
+ Name *string
+ // Type is the type of the volume.
+ Type *string
+ // VolumeSize is the size of the volume.
+ VolumeSize string
+ // Encrypted determines if the volume should be encrypted.
+ Encrypted *bool
+}
+
+// DataVolume contains information about a data volume.
+type DataVolume struct {
+ // Name of the volume to make it referencable.
+ Name string
+ // Type is the type of the volume.
+ Type *string
+ // VolumeSize is the size of the volume.
+ VolumeSize string
+ // Encrypted determines if the volume should be encrypted.
+ Encrypted *bool
+}
+
+// CRI contains information about the Container Runtimes.
+type CRI struct {
+ // The name of the CRI library
+ Name CRIName
+ // ContainerRuntimes is the list of the required container runtimes supported for a worker pool.
+ ContainerRuntimes []ContainerRuntime
+}
+
+// CRIName is a type alias for the CRI name string.
+type CRIName string
+
+const (
+ CRINameContainerD CRIName = "containerd"
+)
+
+// ContainerRuntime contains information about worker's available container runtime
+type ContainerRuntime struct {
+ // Type is the type of the Container Runtime.
+ Type string
+ // ProviderConfig is the configuration passed to the ContainerRuntime resource.
+ ProviderConfig *runtime.RawExtension
+}
+
+var (
+ // DefaultWorkerMaxSurge is the default value for Worker MaxSurge.
+ DefaultWorkerMaxSurge = intstr.FromInt(1)
+ // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable.
+ DefaultWorkerMaxUnavailable = intstr.FromInt(0)
+)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Other/miscellaneous constants and types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+const (
+ // ShootEventImageVersionMaintenance indicates that a maintenance operation regarding the image version has been performed.
+ ShootEventImageVersionMaintenance = "MachineImageVersionMaintenance"
+ // ShootEventK8sVersionMaintenance indicates that a maintenance operation regarding the K8s version has been performed.
+ ShootEventK8sVersionMaintenance = "KubernetesVersionMaintenance"
+ // ShootEventHibernationEnabled indicates that hibernation started.
+ ShootEventHibernationEnabled = "Hibernated"
+ // ShootEventHibernationDisabled indicates that hibernation ended.
+ ShootEventHibernationDisabled = "WokenUp"
+ // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully.
+ ShootEventSchedulingSuccessful = "SchedulingSuccessful"
+ // ShootEventSchedulingFailed indicates that a scheduling decision failed.
+ ShootEventSchedulingFailed = "SchedulingFailed"
+)
+
+const (
+ // ShootAPIServerAvailable is a constant for a condition type indicating that the Shoot cluster's API server is available.
+ ShootAPIServerAvailable ConditionType = "APIServerAvailable"
+ // ShootControlPlaneHealthy is a constant for a condition type indicating the control plane health.
+ ShootControlPlaneHealthy ConditionType = "ControlPlaneHealthy"
+ // ShootEveryNodeReady is a constant for a condition type indicating the node health.
+ ShootEveryNodeReady ConditionType = "EveryNodeReady"
+ // ShootSystemComponentsHealthy is a constant for a condition type indicating the system components health.
+ ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy"
+ // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated.
+ ShootHibernationPossible ConditionType = "HibernationPossible"
+ // ShootMaintenancePreconditionsSatisfied is a constant for a condition type indicating whether all preconditions
+ // for a shoot maintenance operation are satisfied.
+ ShootMaintenancePreconditionsSatisfied ConditionType = "MaintenancePreconditionsSatisfied"
+)
+
+// DNSUnmanaged is a constant for the 'unmanaged' DNS provider.
+const DNSUnmanaged string = "unmanaged"
+
+// ShootPurpose is a type alias for string.
+type ShootPurpose string
+
+const (
+ // ShootPurposeEvaluation is a constant for the evaluation purpose.
+ ShootPurposeEvaluation ShootPurpose = "evaluation"
+ // ShootPurposeTesting is a constant for the testing purpose.
+ ShootPurposeTesting ShootPurpose = "testing"
+ // ShootPurposeDevelopment is a constant for the development purpose.
+ ShootPurposeDevelopment ShootPurpose = "development"
+ // ShootPurposeProduction is a constant for the production purpose.
+ ShootPurposeProduction ShootPurpose = "production"
+ // ShootPurposeInfrastructure is a constant for the infrastructure purpose.
+ ShootPurposeInfrastructure ShootPurpose = "infrastructure"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go
new file mode 100644
index 0000000..55e04ff
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go
@@ -0,0 +1,86 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootState contains the state of a Shoot cluster required to migrate the Shoot's control plane to a new Seed.
+type ShootState struct {
+ metav1.TypeMeta
+ // Standard object metadata.
+ metav1.ObjectMeta
+ // Specification of the ShootState.
+ Spec ShootStateSpec
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootStateList is a list of ShootState objects.
+type ShootStateList struct {
+ metav1.TypeMeta
+ // Standard list object metadata.
+ metav1.ListMeta
+ // Items is the list of ShootStates.
+ Items []ShootState
+}
+
+// ShootStateSpec is the specification of the ShootState.
+type ShootStateSpec struct {
+ // Gardener holds the data required to generate resources deployed by the gardenlet
+ Gardener []GardenerResourceData
+ // Extensions holds the state of custom resources reconciled by extension controllers in the seed
+ Extensions []ExtensionResourceState
+ // Resources holds the data of resources referred to by extension controller states
+ Resources []ResourceData
+}
+
+// GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane.
+type GardenerResourceData struct {
+ // Name of the object required to generate resources
+ Name string
+ // Type of the object
+ Type string
+ // Data contains the payload required to generate resources
+ Data runtime.RawExtension
+}
+
+// ExtensionResourceState contains the kind of the extension custom resource and its last observed state in the Shoot's
+// namespace on the Seed cluster.
+type ExtensionResourceState struct {
+ // Kind (type) of the extension custom resource
+ Kind string
+ // Name of the extension custom resource
+ Name *string
+ // Purpose of the extension custom resource
+ Purpose *string
+ // State of the extension resource
+ State *runtime.RawExtension
+ // Resources holds a list of named resource references that can be referred to in the state by their names.
+ Resources []NamedResourceReference
+}
+
+// ResourceData holds the data of a resource referred to by an extension controller state.
+type ResourceData struct {
+ autoscalingv1.CrossVersionObjectReference
+ // Data of the resource
+ Data runtime.RawExtension
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go
new file mode 100644
index 0000000..5315760
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Condition holds the information about the state of a resource.
+type Condition struct {
+ // Type of the Shoot condition.
+ Type ConditionType
+ // Status of the condition, one of True, False, Unknown.
+ Status ConditionStatus
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time
+ // Last time the condition was updated.
+ LastUpdateTime metav1.Time
+ // The reason for the condition's last transition.
+ Reason string
+ // A human readable message indicating details about the transition.
+ Message string
+ // Well-defined error codes in case the condition reports a problem.
+ Codes []ErrorCode
+}
+
+// ConditionStatus is the status of a condition.
+type ConditionStatus string
+
+// ConditionType is a string alias.
+type ConditionType string
+
+const (
+ // ConditionAvailable is a condition type for indicating availability.
+ ConditionAvailable ConditionType = "Available"
+
+ // ConditionTrue means a resource is in the condition.
+ ConditionTrue ConditionStatus = "True"
+ // ConditionFalse means a resource is not in the condition.
+ ConditionFalse ConditionStatus = "False"
+ // ConditionUnknown means Gardener can't decide if a resource is in the condition or not.
+ ConditionUnknown ConditionStatus = "Unknown"
+ // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold.
+ // In the future, we could add other intermediate conditions, e.g. ConditionDegraded.
+ ConditionProgressing ConditionStatus = "Progressing"
+
+ // ConditionCheckError is a constant for a reason in condition.
+ ConditionCheckError = "ConditionCheckError"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/constants/types_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/constants/types_constants.go
new file mode 100644
index 0000000..ca925c3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/constants/types_constants.go
@@ -0,0 +1,258 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package constants
+
+const (
+ // SecretNameCACluster is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of a shoot cluster.
+ SecretNameCACluster = "ca"
+ // SecretNameCAETCD is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the etcd of a shoot cluster.
+ SecretNameCAETCD = "ca-etcd"
+ // SecretNameCAFrontProxy is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the kube-aggregator a shoot cluster.
+ SecretNameCAFrontProxy = "ca-front-proxy"
+ // SecretNameCAKubelet is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the kubelet of a shoot cluster.
+ SecretNameCAKubelet = "ca-kubelet"
+ // SecretNameCAMetricsServer is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the metrics-server of a shoot cluster.
+ SecretNameCAMetricsServer = "ca-metrics-server"
+ // SecretNameCloudProvider is a constant for the name of a Kubernetes secret object that contains the provider
+ // specific credentials that shall be used to create/delete the shoot.
+ SecretNameCloudProvider = "cloudprovider"
+ // SecretNameSSHKeyPair is a constant for the name of a Kubernetes secret object that contains the SSH key pair
+ // (public and private key) that can be used to SSH into the shoot nodes.
+ SecretNameSSHKeyPair = "ssh-keypair"
+
+ // SecretNameGardener is a constant for the name of a Kubernetes secret object that contains the client
+ // certificate and a kubeconfig for a shoot cluster. It is used by Gardener and can be used by extension
+ // controllers in order to communicate with the shoot's API server. The client certificate has administrator
+ // privileges.
+ SecretNameGardener = "gardener"
+
+ // DeploymentNameClusterAutoscaler is a constant for the name of a Kubernetes deployment object that contains
+ // the cluster-autoscaler pod.
+ DeploymentNameClusterAutoscaler = "cluster-autoscaler"
+ // DeploymentNameKubeAPIServer is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-apiserver pod.
+ DeploymentNameKubeAPIServer = "kube-apiserver"
+ // DeploymentNameKubeControllerManager is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-controller-manager pod.
+ DeploymentNameKubeControllerManager = "kube-controller-manager"
+
+ // DeploymentNameKubeScheduler is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-scheduler pod.
+ DeploymentNameKubeScheduler = "kube-scheduler"
+ // DeploymentNameGardenerResourceManager is a constant for the name of a Kubernetes deployment object that contains
+ // the gardener-resource-manager pod.
+ DeploymentNameGardenerResourceManager = "gardener-resource-manager"
+ // DeploymentNameGrafanaOperators is a constant for the name of a Kubernetes deployment object that contains
+ // the grafana-operators pod.
+ DeploymentNameGrafanaOperators = "grafana-operators"
+ // DeploymentNameGrafanaUsers is a constant for the name of a Kubernetes deployment object that contains
+ // the grafana-users pod.
+ DeploymentNameGrafanaUsers = "grafana-users"
+ // DeploymentNameKubeStateMetricsShoot is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-state-metrics pod.
+ DeploymentNameKubeStateMetricsShoot = "kube-state-metrics"
+
+ // StatefulSetNameAlertManager is a constant for the name of a Kubernetes stateful set object that contains
+ // the alertmanager pod.
+ StatefulSetNameAlertManager = "alertmanager"
+ // ETCDRoleMain is a constant for the main etcd role.
+ ETCDRoleMain = "main"
+ // ETCDRoleEvents is a constant for the events etcd role.
+ ETCDRoleEvents = "events"
+ // ETCDMain is a constant for the name of etcd-main Etcd object.
+ ETCDMain = "etcd-" + ETCDRoleMain
+ // ETCDEvents is a constant for the name of etcd-events Etcd object.
+ ETCDEvents = "etcd-" + ETCDRoleEvents
+
+ // StatefulSetNamePrometheus is a constant for the name of a Kubernetes stateful set object that contains
+ // the prometheus pod.
+ StatefulSetNamePrometheus = "prometheus"
+
+ // GardenerPurpose is a constant for the key in a label describing the purpose of the respective object.
+ GardenerPurpose = "gardener.cloud/purpose"
+
+ // GardenerOperation is a constant for an annotation on a resource that describes a desired operation.
+ GardenerOperation = "gardener.cloud/operation"
+ // GardenerOperationReconcile is a constant for the value of the operation annotation describing a reconcile
+ // operation.
+ GardenerOperationReconcile = "reconcile"
+ // GardenerOperationMigrate is a constant for the value of the operation annotation describing a migration
+ // operation.
+ GardenerOperationMigrate = "migrate"
+ // GardenerOperationRestore is a constant for the value of the operation annotation describing a restoration
+ // operation.
+ GardenerOperationRestore = "restore"
+ // GardenerOperationWaitForState is a constant for the value of the operation annotation describing a wait
+ // operation.
+ GardenerOperationWaitForState = "wait-for-state"
+
+ // DeprecatedGardenRole is the key for an annotation on a Kubernetes object indicating what it is used for.
+ //
+ // Deprecated: Use `GardenRole` instead.
+ DeprecatedGardenRole = "garden.sapcloud.io/role"
+ // GardenRole is a constant for a label that describes a role.
+ GardenRole = "gardener.cloud/role"
+ // GardenRoleExtension is a constant for a label that describes the 'extensions' role.
+ GardenRoleExtension = "extension"
+ // GardenRoleSeed is the value of the GardenRole key indicating type 'seed'.
+ GardenRoleSeed = "seed"
+ // GardenRoleShoot is the value of the GardenRole key indicating type 'shoot'.
+ GardenRoleShoot = "shoot"
+ // GardenRoleLogging is the value of the GardenRole key indicating type 'logging'.
+ GardenRoleLogging = "logging"
+ // GardenRoleProject is the value of GardenRole key indicating type 'project'.
+ GardenRoleProject = "project"
+ // GardenRoleControlPlane is the value of the GardenRole key indicating type 'controlplane'.
+ GardenRoleControlPlane = "controlplane"
+ // GardenRoleSystemComponent is the value of the GardenRole key indicating type 'system-component'.
+ GardenRoleSystemComponent = "system-component"
+ // GardenRoleMonitoring is the value of the GardenRole key indicating type 'monitoring'.
+ GardenRoleMonitoring = "monitoring"
+ // GardenRoleOptionalAddon is the value of the GardenRole key indicating type 'optional-addon'.
+ GardenRoleOptionalAddon = "optional-addon"
+
+ // DeprecatedShootUID is an annotation key for the shoot namespace in the seed cluster,
+ // which value will be the value of `shoot.status.uid`
+ //
+ // Deprecated: Use the `Cluster` resource or the annotation key from the new API group `ShootUID`.
+ DeprecatedShootUID = "shoot.garden.sapcloud.io/uid"
+ // ShootUID is an annotation key for the shoot namespace in the seed cluster,
+ // which value will be the value of `shoot.status.uid`
+ ShootUID = "shoot.gardener.cloud/uid"
+
+ // SeedResourceManagerClass is the resource-class managed by the Gardener-Resource-Manager
+ // instance in the garden namespace on the seeds.
+ SeedResourceManagerClass = "seed"
+ // LabelBackupProvider is used to identify the backup provider.
+ LabelBackupProvider = "backup.gardener.cloud/provider"
+ // LabelSeedProvider is used to identify the seed provider.
+ LabelSeedProvider = "seed.gardener.cloud/provider"
+ // LabelShootProvider is used to identify the shoot provider.
+ LabelShootProvider = "shoot.gardener.cloud/provider"
+ // LabelNetworkingProvider is used to identify the networking provider for the cni plugin.
+ LabelNetworkingProvider = "networking.shoot.gardener.cloud/provider"
+ // LabelExtensionConfiguration is used to identify the provider's configuration which will be added to Gardener configuration
+ LabelExtensionConfiguration = "extensions.gardener.cloud/configuration"
+ // LabelLogging is a constant for a label for logging stack configurations
+ LabelLogging = "logging"
+ // LabelMonitoring is a constant for a label for monitoring stack configurations
+ LabelMonitoring = "monitoring"
+
+ // LabelNetworkPolicyToBlockedCIDRs allows Egress from pods labeled with 'networking.gardener.cloud/to-blocked-cidrs=allowed'.
+ LabelNetworkPolicyToBlockedCIDRs = "networking.gardener.cloud/to-blocked-cidrs"
+ // LabelNetworkPolicyToDNS allows Egress from pods labeled with 'networking.gardener.cloud/to-dns=allowed' to DNS running in 'kube-system'.
+ // In practice, most of the Pods which require network Egress need this label.
+ LabelNetworkPolicyToDNS = "networking.gardener.cloud/to-dns"
+ // LabelNetworkPolicyToPrivateNetworks allows Egress from pods labeled with 'networking.gardener.cloud/to-private-networks=allowed' to the
+ // private networks (RFC1918), Carrier-grade NAT (RFC6598) except for cloudProvider's specific metadata service IP, seed networks,
+ // shoot networks.
+ LabelNetworkPolicyToPrivateNetworks = "networking.gardener.cloud/to-private-networks"
+ // LabelNetworkPolicyToPublicNetworks allows Egress from pods labeled with 'networking.gardener.cloud/to-public-networks=allowed' to all public
+ // network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), cloudProvider's specific metadata service IP.
+ // In practice, this blocks Egress traffic to all networks in the Seed cluster and only traffic to public IPv4 addresses.
+ LabelNetworkPolicyToPublicNetworks = "networking.gardener.cloud/to-public-networks"
+ // LabelNetworkPolicyToSeedAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-seed-apiserver=allowed' to Seed's Kubernetes
+ // API Server.
+ LabelNetworkPolicyToSeedAPIServer = "networking.gardener.cloud/to-seed-apiserver"
+ // LabelNetworkPolicyToShootAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-shoot-apiserver=allowed' to talk to Shoot's
+ // Kubernetes API Server.
+ LabelNetworkPolicyToShootAPIServer = "networking.gardener.cloud/to-shoot-apiserver"
+ // LabelNetworkPolicyToAll disables all Ingress and Egress traffic into/from this namespace when set to "disallowed".
+ LabelNetworkPolicyToAll = "networking.gardener.cloud/to-all"
+ // LabelNetworkPolicyFromPrometheus allows Ingress from Prometheus to pods labeled with 'networking.gardener.cloud/from-prometheus=allowed' and ports
+ // named 'metrics' in the PodSpecification.
+ LabelNetworkPolicyFromPrometheus = "networking.gardener.cloud/from-prometheus"
+ // LabelNetworkPolicyAllowed is a constant for allowing a network policy.
+ LabelNetworkPolicyAllowed = "allowed"
+ // LabelNetworkPolicyDisallowed is a constant for disallowing a network policy.
+ LabelNetworkPolicyDisallowed = "disallowed"
+
+ // LabelApp is a constant for a label key.
+ LabelApp = "app"
+ // LabelRole is a constant for a label key.
+ LabelRole = "role"
+ // LabelKubernetes is a constant for a label for Kubernetes workload.
+ LabelKubernetes = "kubernetes"
+ // LabelAPIServer is a constant for a label for the kube-apiserver.
+ LabelAPIServer = "apiserver"
+ // LabelControllerManager is a constant for a label for the kube-controller-manager.
+ LabelControllerManager = "controller-manager"
+ // LabelScheduler is a constant for a label for the kube-scheduler.
+ LabelScheduler = "scheduler"
+ // LabelExtensionProjectRole is a constant for a label value for extension project roles
+ LabelExtensionProjectRole = "extension-project-role"
+
+ // LabelAPIServerExposure is a constant for label key which gardener can add to various objects related
+ // to kube-apiserver exposure.
+ LabelAPIServerExposure = "core.gardener.cloud/apiserver-exposure"
+ // LabelAPIServerExposureGardenerManaged is a constant for label value which gardener sets on the label key
+ // "core.gardener.cloud/apiserver-exposure" to indicate that it's responsible for apiserver exposure (via SNI).
+ LabelAPIServerExposureGardenerManaged = "gardener-managed"
+
+ // LabelWorkerPoolSystemComponents is a constant that indicates whether the worker pool should host system components
+ LabelWorkerPoolSystemComponents = "worker.gardener.cloud/system-components"
+
+ // GardenNamespace is the namespace in which the configuration and secrets for
+ // the Gardener controller manager will be stored (e.g., secrets for the Seed clusters).
+ // It is also used by the gardener-apiserver.
+ GardenNamespace = "garden"
+
+ // AnnotationShootUseAsSeed is a constant for an annotation on a Shoot resource indicating that the Shoot shall be registered as Seed in the
+ // Garden cluster once successfully created.
+ AnnotationShootUseAsSeed = "shoot.gardener.cloud/use-as-seed"
+ // AnnotationShootIgnoreAlerts is the key for an annotation of a Shoot cluster whose value indicates
+ // if alerts for this cluster should be ignored
+ AnnotationShootIgnoreAlerts = "shoot.gardener.cloud/ignore-alerts"
+ // AnnotationShootSkipCleanup is a key for an annotation on a Shoot resource that declares that the clean up steps should be skipped when the
+ // cluster is deleted. Concretely, this will skip everything except the deletion of (load balancer) services and persistent volume resources.
+ AnnotationShootSkipCleanup = "shoot.gardener.cloud/skip-cleanup"
+ // AnnotationShootKonnectivityTunnel is the key for an annotation of a Shoot cluster whose value indicates
+ // if a konnectivity-tunnel should be deployed into the shoot cluster or not.
+ AnnotationShootKonnectivityTunnel = "alpha.featuregates.shoot.gardener.cloud/konnectivity-tunnel"
+
+ // OperatingSystemConfigUnitNameKubeletService is a constant for a unit in the operating system config that contains the kubelet service.
+ OperatingSystemConfigUnitNameKubeletService = "kubelet.service"
+ // OperatingSystemConfigUnitNameDockerService is a constant for a unit in the operating system config that contains the docker service.
+ OperatingSystemConfigUnitNameDockerService = "docker.service"
+ // OperatingSystemConfigFilePathKernelSettings is a constant for a path to a file in the operating system config that contains some general kernel settings.
+ OperatingSystemConfigFilePathKernelSettings = "/etc/sysctl.d/99-k8s-general.conf"
+ // OperatingSystemConfigFilePathKubeletConfig is a constant for a path to a file in the operating system config that contains the kubelet configuration.
+ OperatingSystemConfigFilePathKubeletConfig = "/var/lib/kubelet/config/kubelet"
+
+ // FluentBitConfigMapKubernetesFilter is a constant for the Fluent Bit ConfigMap's section regarding Kubernetes filters
+ FluentBitConfigMapKubernetesFilter = "filter-kubernetes.conf"
+ // FluentBitConfigMapParser is a constant for the Fluent Bit ConfigMap's section regarding Parsers for common container types
+ FluentBitConfigMapParser = "parsers.conf"
+ // PrometheusConfigMapAlertingRules is a constant for the Prometheus alerting rules tag in provider-specific monitoring configuration
+ PrometheusConfigMapAlertingRules = "alerting_rules"
+ // PrometheusConfigMapScrapeConfig is a constant for the Prometheus scrape config tag in provider-specific monitoring configuration
+ PrometheusConfigMapScrapeConfig = "scrape_config"
+ // GrafanaConfigMapUserDashboard is a constant for the Grafana user dashboard tag in provider-specific monitoring configuration
+ GrafanaConfigMapUserDashboard = "dashboard_users"
+ // GrafanaConfigMapOperatorDashboard is a constant for the Grafana operator dashboard tag in provider-specific monitoring configuration
+ GrafanaConfigMapOperatorDashboard = "dashboard_operators"
+
+ // LabelControllerRegistrationName is the key of a label on extension namespaces that indicates the controller registration name.
+ LabelControllerRegistrationName = "controllerregistration.core.gardener.cloud/name"
+
+ // EventResourceReferenced indicates that the resource deletion is in waiting mode because the resource is still
+ // being referenced by at least one other resource (e.g. a SecretBinding is still referenced by a Shoot)
+ EventResourceReferenced = "ResourceReferenced"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go
new file mode 100644
index 0000000..48dce7b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go
@@ -0,0 +1,410 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "fmt"
+ "unsafe"
+
+ "github.com/gardener/gardener/pkg/apis/core"
+
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) error {
+ if err := scheme.AddFieldLabelConversionFunc(
+ SchemeGroupVersion.WithKind("ControllerInstallation"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", core.RegistrationRefName, core.SeedRefName:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ },
+ ); err != nil {
+ return err
+ }
+
+ if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Shoot"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", core.ShootSeedName, core.ShootCloudProfileName, core.ShootStatusSeedName:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ },
+ ); err != nil {
+ return err
+ }
+
+ // Add non-generated conversion functions
+ if err := scheme.AddConversionFunc((*BackupBucket)(nil), (*core.BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucket_To_core_BackupBucket(a.(*BackupBucket), b.(*core.BackupBucket), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*BackupBucketSpec)(nil), (*core.BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(a.(*BackupBucketSpec), b.(*core.BackupBucketSpec), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*BackupEntry)(nil), (*core.BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupEntry_To_core_BackupEntry(a.(*BackupEntry), b.(*core.BackupEntry), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*BackupEntrySpec)(nil), (*core.BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(a.(*BackupEntrySpec), b.(*core.BackupEntrySpec), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*Seed)(nil), (*core.Seed)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Seed_To_core_Seed(a.(*Seed), b.(*core.Seed), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*SeedNetworks)(nil), (*core.SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(a.(*SeedNetworks), b.(*core.SeedNetworks), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*ShootStatus)(nil), (*core.ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootStatus_To_core_ShootStatus(a.(*ShootStatus), b.(*core.ShootStatus), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.BackupBucket)(nil), (*BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucket_To_v1alpha1_BackupBucket(a.(*core.BackupBucket), b.(*BackupBucket), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.BackupBucketSpec)(nil), (*BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(a.(*core.BackupBucketSpec), b.(*BackupBucketSpec), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.BackupEntry)(nil), (*BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntry_To_v1alpha1_BackupEntry(a.(*core.BackupEntry), b.(*BackupEntry), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.BackupEntrySpec)(nil), (*BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(a.(*core.BackupEntrySpec), b.(*BackupEntrySpec), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.Seed)(nil), (*Seed)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Seed_To_v1alpha1_Seed(a.(*core.Seed), b.(*Seed), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.SeedSpec)(nil), (*SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSpec_To_v1alpha1_SeedSpec(a.(*core.SeedSpec), b.(*SeedSpec), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.SeedNetworks)(nil), (*SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(a.(*core.SeedNetworks), b.(*SeedNetworks), scope)
+ }); err != nil {
+ return err
+ }
+
+ if err := scheme.AddConversionFunc((*core.ShootStatus)(nil), (*ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootStatus_To_v1alpha1_ShootStatus(a.(*core.ShootStatus), b.(*ShootStatus), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error {
+ if err := autoConvert_v1alpha1_BackupBucket_To_core_BackupBucket(in, out, s); err != nil {
+ return err
+ }
+
+ out.Spec.SeedName = in.Spec.Seed
+
+ return nil
+}
+
+func Convert_core_BackupBucket_To_v1alpha1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error {
+ if err := autoConvert_core_BackupBucket_To_v1alpha1_BackupBucket(in, out, s); err != nil {
+ return err
+ }
+
+ out.Spec.Seed = in.Spec.SeedName
+
+ return nil
+}
+
+func Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(in, out, s)
+}
+
+func Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(in, out, s)
+}
+
+func Convert_v1alpha1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error {
+ if err := autoConvert_v1alpha1_BackupEntry_To_core_BackupEntry(in, out, s); err != nil {
+ return err
+ }
+
+ out.Spec.SeedName = in.Spec.Seed
+
+ return nil
+}
+
+func Convert_core_BackupEntry_To_v1alpha1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error {
+ if err := autoConvert_core_BackupEntry_To_v1alpha1_BackupEntry(in, out, s); err != nil {
+ return err
+ }
+
+ out.Spec.Seed = in.Spec.SeedName
+
+ return nil
+}
+
+func Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error {
+ return autoConvert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(in, out, s)
+}
+
+func Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(in, out, s)
+}
+
+func Convert_v1alpha1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error {
+ if err := autoConvert_v1alpha1_Seed_To_core_Seed(in, out, s); err != nil {
+ return err
+ }
+
+ out.Spec.Networks.BlockCIDRs = in.Spec.BlockCIDRs
+
+ return nil
+}
+
+func Convert_core_Seed_To_v1alpha1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error {
+ if err := autoConvert_core_Seed_To_v1alpha1_Seed(in, out, s); err != nil {
+ return err
+ }
+
+ out.Spec.BlockCIDRs = in.Spec.Networks.BlockCIDRs
+
+ return nil
+}
+
+func Convert_core_SeedSpec_To_v1alpha1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error {
+ return autoConvert_core_SeedSpec_To_v1alpha1_SeedSpec(in, out, s)
+}
+
+func Convert_v1alpha1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSpec_To_core_SeedSpec(in, out, s)
+}
+
+func Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error {
+ return autoConvert_core_SeedNetworks_To_v1alpha1_SeedNetworks(in, out, s)
+}
+
+func Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedNetworks_To_core_SeedNetworks(in, out, s)
+}
+
+func Convert_core_SeedStatus_To_v1alpha1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error {
+ return autoConvert_core_SeedStatus_To_v1alpha1_SeedStatus(in, out, s)
+}
+
+func Convert_core_ShootStatus_To_v1alpha1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error {
+ if err := autoConvert_core_ShootStatus_To_v1alpha1_ShootStatus(in, out, s); err != nil {
+ return err
+ }
+
+ if len(in.LastErrors) != 0 {
+ out.LastError = (*LastError)(unsafe.Pointer(&in.LastErrors[0]))
+ if len(in.LastErrors) > 1 {
+ lastErrors := in.LastErrors[1:]
+ out.LastErrors = *(*[]LastError)(unsafe.Pointer(&lastErrors))
+ } else {
+ out.LastErrors = nil
+ }
+ }
+
+ out.Seed = in.SeedName
+
+ return nil
+}
+
+func Convert_v1alpha1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error {
+ if err := autoConvert_v1alpha1_ShootStatus_To_core_ShootStatus(in, out, s); err != nil {
+ return err
+ }
+
+ if in.LastError != nil {
+ outLastErrors := []core.LastError{
+ {
+ Description: in.LastError.Description,
+ Codes: *(*[]core.ErrorCode)(unsafe.Pointer(&in.LastError.Codes)),
+ LastUpdateTime: in.LastError.LastUpdateTime,
+ },
+ }
+ out.LastErrors = append(outLastErrors, *(*[]core.LastError)(unsafe.Pointer(&in.LastErrors))...)
+ } else {
+ out.LastErrors = nil
+ }
+
+ out.SeedName = in.Seed
+
+ return nil
+}
+
+func Convert_v1alpha1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error {
+ if err := autoConvert_v1alpha1_ProjectSpec_To_core_ProjectSpec(in, out, s); err != nil {
+ return err
+ }
+
+ if owner := out.Owner; owner != nil {
+ outer:
+ for i, member := range out.Members {
+ if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind {
+ // add owner role to the current project's owner if not present
+ for _, role := range member.Roles {
+ if role == core.ProjectMemberOwner {
+ continue outer
+ }
+ }
+
+ out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner)
+ } else {
+ // delete owner role from all other members
+ out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner)
+ }
+ }
+ }
+
+ return nil
+}
+
+func Convert_core_ProjectSpec_To_v1alpha1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error {
+ if err := autoConvert_core_ProjectSpec_To_v1alpha1_ProjectSpec(in, out, s); err != nil {
+ return err
+ }
+
+ if owner := out.Owner; owner != nil {
+ outer:
+ for i, member := range out.Members {
+ if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind {
+ // add owner role to the current project's owner if not present
+ if member.Role == core.ProjectMemberOwner {
+ // remove it from owners list if present
+ out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner)
+ continue outer
+ }
+ for _, role := range member.Roles {
+ if role == ProjectMemberOwner {
+ continue outer
+ }
+ }
+
+ if out.Members[i].Role == "" {
+ out.Members[i].Role = core.ProjectMemberOwner
+ } else {
+ out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner)
+ }
+ } else {
+ // delete owner role from all other members
+ out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner)
+
+ if member.Role == ProjectMemberOwner {
+ if len(out.Members[i].Roles) == 0 {
+ out.Members[i].Role = ""
+ } else {
+ out.Members[i].Role = out.Members[i].Roles[0]
+ if len(out.Members[i].Roles) > 1 {
+ out.Members[i].Roles = out.Members[i].Roles[1:]
+ } else {
+ out.Members[i].Roles = nil
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func Convert_v1alpha1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error {
+ if err := autoConvert_v1alpha1_ProjectMember_To_core_ProjectMember(in, out, s); err != nil {
+ return err
+ }
+
+ if len(in.Role) == 0 {
+ return nil
+ }
+
+ // delete in.Role from out.Roles to make sure it gets added to the head
+ if len(out.Roles) > 0 {
+ out.Roles = removeRoleFromRoles(out.Roles, in.Role)
+ }
+
+ // add in.Role to the head of out.Roles
+ out.Roles = append([]string{in.Role}, out.Roles...)
+
+ return nil
+}
+
+func Convert_core_ProjectMember_To_v1alpha1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error {
+ if err := autoConvert_core_ProjectMember_To_v1alpha1_ProjectMember(in, out, s); err != nil {
+ return err
+ }
+
+ if len(in.Roles) > 0 {
+ out.Role = in.Roles[0]
+ out.Roles = in.Roles[1:]
+ }
+
+ return nil
+}
+
+func removeRoleFromRoles(roles []string, role string) []string {
+ var newRoles []string
+ for _, r := range roles {
+ if r != role {
+ newRoles = append(newRoles, r)
+ }
+ }
+ return newRoles
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go
new file mode 100644
index 0000000..349bb6a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go
@@ -0,0 +1,361 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "math"
+ "time"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ "github.com/gardener/gardener/pkg/utils"
+ versionutils "github.com/gardener/gardener/pkg/utils/version"
+
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/utils/pointer"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+ return RegisterDefaults(scheme)
+}
+
+// SetDefaults_SecretBinding sets default values for SecretBinding objects.
+func SetDefaults_SecretBinding(obj *SecretBinding) {
+ if len(obj.SecretRef.Namespace) == 0 {
+ obj.SecretRef.Namespace = obj.Namespace
+ }
+
+ for i, quota := range obj.Quotas {
+ if len(quota.Namespace) == 0 {
+ obj.Quotas[i].Namespace = obj.Namespace
+ }
+ }
+}
+
+// SetDefaults_Project sets default values for Project objects.
+func SetDefaults_Project(obj *Project) {
+ defaultSubject(obj.Spec.Owner)
+
+ for i, member := range obj.Spec.Members {
+ defaultSubject(&obj.Spec.Members[i].Subject)
+
+ if len(member.Role) == 0 && len(member.Roles) == 0 {
+ obj.Spec.Members[i].Role = ProjectMemberViewer
+ }
+ }
+
+ if obj.Spec.Namespace != nil && *obj.Spec.Namespace == v1beta1constants.GardenNamespace {
+ if obj.Spec.Tolerations == nil {
+ obj.Spec.Tolerations = &ProjectTolerations{}
+ }
+ addTolerations(&obj.Spec.Tolerations.Whitelist, Toleration{Key: SeedTaintProtected})
+ addTolerations(&obj.Spec.Tolerations.Defaults, Toleration{Key: SeedTaintProtected})
+ }
+}
+
+func defaultSubject(obj *rbacv1.Subject) {
+ if obj != nil && len(obj.APIGroup) == 0 {
+ switch obj.Kind {
+ case rbacv1.ServiceAccountKind:
+ obj.APIGroup = ""
+ case rbacv1.UserKind:
+ obj.APIGroup = rbacv1.GroupName
+ case rbacv1.GroupKind:
+ obj.APIGroup = rbacv1.GroupName
+ }
+ }
+}
+
+// SetDefaults_MachineType sets default values for MachineType objects.
+func SetDefaults_MachineType(obj *MachineType) {
+ if obj.Usable == nil {
+ trueVar := true
+ obj.Usable = &trueVar
+ }
+}
+
+// SetDefaults_VolumeType sets default values for VolumeType objects.
+func SetDefaults_VolumeType(obj *VolumeType) {
+ if obj.Usable == nil {
+ trueVar := true
+ obj.Usable = &trueVar
+ }
+}
+
+// SetDefaults_Seed sets default values for Seed objects.
+func SetDefaults_Seed(obj *Seed) {
+ if obj.Spec.Settings == nil {
+ obj.Spec.Settings = &SeedSettings{}
+ }
+
+ if obj.Spec.Settings.ExcessCapacityReservation == nil {
+ obj.Spec.Settings.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{Enabled: true}
+ }
+
+ if obj.Spec.Settings.Scheduling == nil {
+ obj.Spec.Settings.Scheduling = &SeedSettingScheduling{Visible: true}
+ }
+
+ if obj.Spec.Settings.ShootDNS == nil {
+ obj.Spec.Settings.ShootDNS = &SeedSettingShootDNS{Enabled: true}
+ }
+
+ if obj.Spec.Settings.VerticalPodAutoscaler == nil {
+ obj.Spec.Settings.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{Enabled: true}
+ }
+}
+
+// SetDefaults_Shoot sets default values for Shoot objects.
+func SetDefaults_Shoot(obj *Shoot) {
+ k8sVersionLessThan116, _ := versionutils.CompareVersions(obj.Spec.Kubernetes.Version, "<", "1.16")
+ // Error is ignored here because we cannot do anything meaningful with it.
+ // k8sVersionLessThan116 will default to `false`.
+
+ if obj.Spec.Kubernetes.AllowPrivilegedContainers == nil {
+ obj.Spec.Kubernetes.AllowPrivilegedContainers = pointer.BoolPtr(true)
+ }
+
+ if obj.Spec.Kubernetes.KubeAPIServer == nil {
+ obj.Spec.Kubernetes.KubeAPIServer = &KubeAPIServerConfig{}
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication == nil {
+ if k8sVersionLessThan116 {
+ obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(true)
+ } else {
+ obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(false)
+ }
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.Requests == nil {
+ obj.Spec.Kubernetes.KubeAPIServer.Requests = &KubeAPIServerRequests{}
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight == nil {
+ obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight = pointer.Int32Ptr(400)
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight == nil {
+ obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight = pointer.Int32Ptr(200)
+ }
+
+ if obj.Spec.Kubernetes.KubeControllerManager == nil {
+ obj.Spec.Kubernetes.KubeControllerManager = &KubeControllerManagerConfig{}
+ }
+ if obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize == nil {
+ obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize = calculateDefaultNodeCIDRMaskSize(obj.Spec.Kubernetes.Kubelet, obj.Spec.Provider.Workers)
+ }
+ if obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout == nil {
+ obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout = &metav1.Duration{Duration: 2 * time.Minute}
+ }
+
+ if obj.Spec.Kubernetes.KubeProxy == nil {
+ obj.Spec.Kubernetes.KubeProxy = &KubeProxyConfig{}
+ }
+ if obj.Spec.Kubernetes.KubeProxy.Mode == nil {
+ defaultProxyMode := ProxyModeIPTables
+ obj.Spec.Kubernetes.KubeProxy.Mode = &defaultProxyMode
+ }
+
+ if obj.Spec.Addons == nil {
+ obj.Spec.Addons = &Addons{}
+ }
+ if obj.Spec.Addons.KubernetesDashboard == nil {
+ obj.Spec.Addons.KubernetesDashboard = &KubernetesDashboard{}
+ }
+ if obj.Spec.Addons.KubernetesDashboard.AuthenticationMode == nil {
+ var defaultAuthMode string
+ if *obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication {
+ defaultAuthMode = KubernetesDashboardAuthModeBasic
+ } else {
+ defaultAuthMode = KubernetesDashboardAuthModeToken
+ }
+ obj.Spec.Addons.KubernetesDashboard.AuthenticationMode = &defaultAuthMode
+ }
+
+ if obj.Spec.Purpose == nil {
+ p := ShootPurposeEvaluation
+ obj.Spec.Purpose = &p
+ }
+
+ // In previous Gardener versions that weren't supporting tolerations, it was hard-coded to (only) allow shoots in the
+ // `garden` namespace to use seeds that had the 'protected' taint. In order to be backwards compatible, now with the
+ // introduction of tolerations, we add the 'protected' toleration to the garden namespace by default.
+ if obj.Namespace == v1beta1constants.GardenNamespace {
+ addTolerations(&obj.Spec.Tolerations, Toleration{Key: SeedTaintProtected})
+ }
+
+ if obj.Spec.Kubernetes.Kubelet == nil {
+ obj.Spec.Kubernetes.Kubelet = &KubeletConfig{}
+ }
+ if obj.Spec.Kubernetes.Kubelet.FailSwapOn == nil {
+ obj.Spec.Kubernetes.Kubelet.FailSwapOn = pointer.BoolPtr(true)
+ }
+
+ var (
+ kubeReservedMemory = resource.MustParse("1Gi")
+ kubeReservedCPU = resource.MustParse("80m")
+ kubeReservedPID = resource.MustParse("20k")
+
+ k8sVersionGreaterEqual115, _ = versionutils.CompareVersions(obj.Spec.Kubernetes.Version, ">=", "1.15")
+ )
+
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved == nil {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved = &KubeletConfigReserved{Memory: &kubeReservedMemory, CPU: &kubeReservedCPU}
+
+ if k8sVersionGreaterEqual115 {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID
+ }
+ } else {
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory == nil {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory = &kubeReservedMemory
+ }
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU == nil {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU = &kubeReservedCPU
+ }
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved.PID == nil && k8sVersionGreaterEqual115 {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID
+ }
+ }
+
+ if obj.Spec.Maintenance == nil {
+ obj.Spec.Maintenance = &Maintenance{}
+ }
+}
+
+// SetDefaults_Maintenance sets default values for Maintenance objects.
+func SetDefaults_Maintenance(obj *Maintenance) {
+ if obj.AutoUpdate == nil {
+ obj.AutoUpdate = &MaintenanceAutoUpdate{
+ KubernetesVersion: true,
+ MachineImageVersion: true,
+ }
+ }
+
+ if obj.TimeWindow == nil {
+ mt := utils.RandomMaintenanceTimeWindow()
+ obj.TimeWindow = &MaintenanceTimeWindow{
+ Begin: mt.Begin().Formatted(),
+ End: mt.End().Formatted(),
+ }
+ }
+}
+
+// SetDefaults_VerticalPodAutoscaler sets default values for VerticalPodAutoscaler objects.
+func SetDefaults_VerticalPodAutoscaler(obj *VerticalPodAutoscaler) {
+ if obj.EvictAfterOOMThreshold == nil {
+ v := DefaultEvictAfterOOMThreshold
+ obj.EvictAfterOOMThreshold = &v
+ }
+ if obj.EvictionRateBurst == nil {
+ v := DefaultEvictionRateBurst
+ obj.EvictionRateBurst = &v
+ }
+ if obj.EvictionRateLimit == nil {
+ v := DefaultEvictionRateLimit
+ obj.EvictionRateLimit = &v
+ }
+ if obj.EvictionTolerance == nil {
+ v := DefaultEvictionTolerance
+ obj.EvictionTolerance = &v
+ }
+ if obj.RecommendationMarginFraction == nil {
+ v := DefaultRecommendationMarginFraction
+ obj.RecommendationMarginFraction = &v
+ }
+ if obj.UpdaterInterval == nil {
+ v := DefaultUpdaterInterval
+ obj.UpdaterInterval = &v
+ }
+ if obj.RecommenderInterval == nil {
+ v := DefaultRecommenderInterval
+ obj.RecommenderInterval = &v
+ }
+}
+
+// SetDefaults_Worker sets default values for Worker objects.
+func SetDefaults_Worker(obj *Worker) {
+ if obj.MaxSurge == nil {
+ obj.MaxSurge = &DefaultWorkerMaxSurge
+ }
+ if obj.MaxUnavailable == nil {
+ obj.MaxUnavailable = &DefaultWorkerMaxUnavailable
+ }
+ if obj.SystemComponents == nil {
+ obj.SystemComponents = &WorkerSystemComponents{
+ Allow: DefaultWorkerSystemComponentsAllow,
+ }
+ }
+}
+
+// SetDefaults_NginxIngress sets default values for NginxIngress objects.
+func SetDefaults_NginxIngress(obj *NginxIngress) {
+ if obj.ExternalTrafficPolicy == nil {
+ v := corev1.ServiceExternalTrafficPolicyTypeCluster
+ obj.ExternalTrafficPolicy = &v
+ }
+}
+
+// SetDefaults_ControllerResource sets default values for ControllerResource objects.
+func SetDefaults_ControllerResource(obj *ControllerResource) {
+ if obj.Primary == nil {
+ obj.Primary = pointer.BoolPtr(true)
+ }
+}
+
+// SetDefaults_ControllerDeployment sets default values for ControllerDeployment objects.
+func SetDefaults_ControllerDeployment(obj *ControllerDeployment) {
+ p := ControllerDeploymentPolicyOnDemand
+ if obj.Policy == nil {
+ obj.Policy = &p
+ }
+}
+
+// Helper functions
+
+func calculateDefaultNodeCIDRMaskSize(kubelet *KubeletConfig, workers []Worker) *int32 {
+ var maxPods int32 = 110 // default maxPods setting on kubelet
+
+ if kubelet != nil && kubelet.MaxPods != nil {
+ maxPods = *kubelet.MaxPods
+ }
+
+ for _, worker := range workers {
+ if worker.Kubernetes != nil && worker.Kubernetes.Kubelet != nil && worker.Kubernetes.Kubelet.MaxPods != nil && *worker.Kubernetes.Kubelet.MaxPods > maxPods {
+ maxPods = *worker.Kubernetes.Kubelet.MaxPods
+ }
+ }
+
+ // by having approximately twice as many available IP addresses as possible Pods, Kubernetes is able to mitigate IP address reuse as Pods are added to and removed from a node.
+ nodeCidrRange := int32(32 - int(math.Ceil(math.Log2(float64(maxPods*2)))))
+ return &nodeCidrRange
+}
+
+func addTolerations(tolerations *[]Toleration, additionalTolerations ...Toleration) {
+ existingTolerations := sets.NewString()
+ for _, toleration := range *tolerations {
+ existingTolerations.Insert(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value))
+ }
+
+ for _, toleration := range additionalTolerations {
+ if existingTolerations.Has(toleration.Key) {
+ continue
+ }
+ if existingTolerations.Has(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) {
+ continue
+ }
+ *tolerations = append(*tolerations, toleration)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go
new file mode 100644
index 0000000..ac2cfd1
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/core
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:protobuf-gen=package
+
+// Package v1alpha1 is a version of the API.
+// +groupName=core.gardener.cloud
+package v1alpha1
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go
new file mode 100644
index 0000000..5ddc5f2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go
@@ -0,0 +1,40856 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto
+
+package v1alpha1
+
+import (
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
+ v13 "k8s.io/api/rbac/v1"
+ resource "k8s.io/apimachinery/pkg/api/resource"
+ v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Addon) Reset() { *m = Addon{} }
+func (*Addon) ProtoMessage() {}
+func (*Addon) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{0}
+}
+func (m *Addon) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Addon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Addon) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Addon.Merge(m, src)
+}
+func (m *Addon) XXX_Size() int {
+ return m.Size()
+}
+func (m *Addon) XXX_DiscardUnknown() {
+ xxx_messageInfo_Addon.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Addon proto.InternalMessageInfo
+
+func (m *Addons) Reset() { *m = Addons{} }
+func (*Addons) ProtoMessage() {}
+func (*Addons) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{1}
+}
+func (m *Addons) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Addons) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Addons) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Addons.Merge(m, src)
+}
+func (m *Addons) XXX_Size() int {
+ return m.Size()
+}
+func (m *Addons) XXX_DiscardUnknown() {
+ xxx_messageInfo_Addons.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Addons proto.InternalMessageInfo
+
+func (m *AdmissionPlugin) Reset() { *m = AdmissionPlugin{} }
+func (*AdmissionPlugin) ProtoMessage() {}
+func (*AdmissionPlugin) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{2}
+}
+func (m *AdmissionPlugin) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AdmissionPlugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AdmissionPlugin) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AdmissionPlugin.Merge(m, src)
+}
+func (m *AdmissionPlugin) XXX_Size() int {
+ return m.Size()
+}
+func (m *AdmissionPlugin) XXX_DiscardUnknown() {
+ xxx_messageInfo_AdmissionPlugin.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionPlugin proto.InternalMessageInfo
+
+func (m *Alerting) Reset() { *m = Alerting{} }
+func (*Alerting) ProtoMessage() {}
+func (*Alerting) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{3}
+}
+func (m *Alerting) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Alerting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Alerting) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Alerting.Merge(m, src)
+}
+func (m *Alerting) XXX_Size() int {
+ return m.Size()
+}
+func (m *Alerting) XXX_DiscardUnknown() {
+ xxx_messageInfo_Alerting.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Alerting proto.InternalMessageInfo
+
+func (m *AuditConfig) Reset() { *m = AuditConfig{} }
+func (*AuditConfig) ProtoMessage() {}
+func (*AuditConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{4}
+}
+func (m *AuditConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuditConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AuditConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuditConfig.Merge(m, src)
+}
+func (m *AuditConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuditConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuditConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuditConfig proto.InternalMessageInfo
+
+func (m *AuditPolicy) Reset() { *m = AuditPolicy{} }
+func (*AuditPolicy) ProtoMessage() {}
+func (*AuditPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{5}
+}
+func (m *AuditPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuditPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AuditPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuditPolicy.Merge(m, src)
+}
+func (m *AuditPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuditPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuditPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuditPolicy proto.InternalMessageInfo
+
+func (m *AvailabilityZone) Reset() { *m = AvailabilityZone{} }
+func (*AvailabilityZone) ProtoMessage() {}
+func (*AvailabilityZone) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{6}
+}
+func (m *AvailabilityZone) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AvailabilityZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AvailabilityZone) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AvailabilityZone.Merge(m, src)
+}
+func (m *AvailabilityZone) XXX_Size() int {
+ return m.Size()
+}
+func (m *AvailabilityZone) XXX_DiscardUnknown() {
+ xxx_messageInfo_AvailabilityZone.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AvailabilityZone proto.InternalMessageInfo
+
+func (m *BackupBucket) Reset() { *m = BackupBucket{} }
+func (*BackupBucket) ProtoMessage() {}
+func (*BackupBucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{7}
+}
+func (m *BackupBucket) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucket.Merge(m, src)
+}
+func (m *BackupBucket) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucket proto.InternalMessageInfo
+
+func (m *BackupBucketList) Reset() { *m = BackupBucketList{} }
+func (*BackupBucketList) ProtoMessage() {}
+func (*BackupBucketList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{8}
+}
+func (m *BackupBucketList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketList.Merge(m, src)
+}
+func (m *BackupBucketList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketList proto.InternalMessageInfo
+
+func (m *BackupBucketProvider) Reset() { *m = BackupBucketProvider{} }
+func (*BackupBucketProvider) ProtoMessage() {}
+func (*BackupBucketProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{9}
+}
+func (m *BackupBucketProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketProvider.Merge(m, src)
+}
+func (m *BackupBucketProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketProvider proto.InternalMessageInfo
+
+func (m *BackupBucketSpec) Reset() { *m = BackupBucketSpec{} }
+func (*BackupBucketSpec) ProtoMessage() {}
+func (*BackupBucketSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{10}
+}
+func (m *BackupBucketSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketSpec.Merge(m, src)
+}
+func (m *BackupBucketSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketSpec proto.InternalMessageInfo
+
+func (m *BackupBucketStatus) Reset() { *m = BackupBucketStatus{} }
+func (*BackupBucketStatus) ProtoMessage() {}
+func (*BackupBucketStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{11}
+}
+func (m *BackupBucketStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketStatus.Merge(m, src)
+}
+func (m *BackupBucketStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketStatus proto.InternalMessageInfo
+
+func (m *BackupEntry) Reset() { *m = BackupEntry{} }
+func (*BackupEntry) ProtoMessage() {}
+func (*BackupEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{12}
+}
+func (m *BackupEntry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntry.Merge(m, src)
+}
+func (m *BackupEntry) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntry proto.InternalMessageInfo
+
+func (m *BackupEntryList) Reset() { *m = BackupEntryList{} }
+func (*BackupEntryList) ProtoMessage() {}
+func (*BackupEntryList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{13}
+}
+func (m *BackupEntryList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntryList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntryList.Merge(m, src)
+}
+func (m *BackupEntryList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntryList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntryList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntryList proto.InternalMessageInfo
+
+func (m *BackupEntrySpec) Reset() { *m = BackupEntrySpec{} }
+func (*BackupEntrySpec) ProtoMessage() {}
+func (*BackupEntrySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{14}
+}
+func (m *BackupEntrySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntrySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntrySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntrySpec.Merge(m, src)
+}
+func (m *BackupEntrySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntrySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntrySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntrySpec proto.InternalMessageInfo
+
+func (m *BackupEntryStatus) Reset() { *m = BackupEntryStatus{} }
+func (*BackupEntryStatus) ProtoMessage() {}
+func (*BackupEntryStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{15}
+}
+func (m *BackupEntryStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntryStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntryStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntryStatus.Merge(m, src)
+}
+func (m *BackupEntryStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntryStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntryStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntryStatus proto.InternalMessageInfo
+
+func (m *CRI) Reset() { *m = CRI{} }
+func (*CRI) ProtoMessage() {}
+func (*CRI) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{16}
+}
+func (m *CRI) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CRI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CRI) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CRI.Merge(m, src)
+}
+func (m *CRI) XXX_Size() int {
+ return m.Size()
+}
+func (m *CRI) XXX_DiscardUnknown() {
+ xxx_messageInfo_CRI.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CRI proto.InternalMessageInfo
+
+func (m *CloudInfo) Reset() { *m = CloudInfo{} }
+func (*CloudInfo) ProtoMessage() {}
+func (*CloudInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{17}
+}
+func (m *CloudInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudInfo.Merge(m, src)
+}
+func (m *CloudInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudInfo proto.InternalMessageInfo
+
+func (m *CloudProfile) Reset() { *m = CloudProfile{} }
+func (*CloudProfile) ProtoMessage() {}
+func (*CloudProfile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{18}
+}
+func (m *CloudProfile) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudProfile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudProfile.Merge(m, src)
+}
+func (m *CloudProfile) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudProfile) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudProfile proto.InternalMessageInfo
+
+func (m *CloudProfileList) Reset() { *m = CloudProfileList{} }
+func (*CloudProfileList) ProtoMessage() {}
+func (*CloudProfileList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{19}
+}
+func (m *CloudProfileList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudProfileList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudProfileList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudProfileList.Merge(m, src)
+}
+func (m *CloudProfileList) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudProfileList) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudProfileList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudProfileList proto.InternalMessageInfo
+
+func (m *CloudProfileSpec) Reset() { *m = CloudProfileSpec{} }
+func (*CloudProfileSpec) ProtoMessage() {}
+func (*CloudProfileSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{20}
+}
+func (m *CloudProfileSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudProfileSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudProfileSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudProfileSpec.Merge(m, src)
+}
+func (m *CloudProfileSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudProfileSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudProfileSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudProfileSpec proto.InternalMessageInfo
+
+func (m *ClusterAutoscaler) Reset() { *m = ClusterAutoscaler{} }
+func (*ClusterAutoscaler) ProtoMessage() {}
+func (*ClusterAutoscaler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{21}
+}
+func (m *ClusterAutoscaler) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterAutoscaler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterAutoscaler.Merge(m, src)
+}
+func (m *ClusterAutoscaler) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterAutoscaler) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterAutoscaler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterAutoscaler proto.InternalMessageInfo
+
+func (m *ClusterInfo) Reset() { *m = ClusterInfo{} }
+func (*ClusterInfo) ProtoMessage() {}
+func (*ClusterInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{22}
+}
+func (m *ClusterInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterInfo.Merge(m, src)
+}
+func (m *ClusterInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterInfo proto.InternalMessageInfo
+
+func (m *Condition) Reset() { *m = Condition{} }
+func (*Condition) ProtoMessage() {}
+func (*Condition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{23}
+}
+func (m *Condition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Condition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Condition.Merge(m, src)
+}
+func (m *Condition) XXX_Size() int {
+ return m.Size()
+}
+func (m *Condition) XXX_DiscardUnknown() {
+ xxx_messageInfo_Condition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Condition proto.InternalMessageInfo
+
+func (m *ContainerRuntime) Reset() { *m = ContainerRuntime{} }
+func (*ContainerRuntime) ProtoMessage() {}
+func (*ContainerRuntime) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{24}
+}
+func (m *ContainerRuntime) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ContainerRuntime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ContainerRuntime) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ContainerRuntime.Merge(m, src)
+}
+func (m *ContainerRuntime) XXX_Size() int {
+ return m.Size()
+}
+func (m *ContainerRuntime) XXX_DiscardUnknown() {
+ xxx_messageInfo_ContainerRuntime.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerRuntime proto.InternalMessageInfo
+
+func (m *ControllerDeployment) Reset() { *m = ControllerDeployment{} }
+func (*ControllerDeployment) ProtoMessage() {}
+func (*ControllerDeployment) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{25}
+}
+func (m *ControllerDeployment) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerDeployment) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerDeployment.Merge(m, src)
+}
+func (m *ControllerDeployment) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerDeployment) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerDeployment.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerDeployment proto.InternalMessageInfo
+
+func (m *ControllerInstallation) Reset() { *m = ControllerInstallation{} }
+func (*ControllerInstallation) ProtoMessage() {}
+func (*ControllerInstallation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{26}
+}
+func (m *ControllerInstallation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallation.Merge(m, src)
+}
+func (m *ControllerInstallation) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallation) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallation proto.InternalMessageInfo
+
+func (m *ControllerInstallationList) Reset() { *m = ControllerInstallationList{} }
+func (*ControllerInstallationList) ProtoMessage() {}
+func (*ControllerInstallationList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{27}
+}
+func (m *ControllerInstallationList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallationList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallationList.Merge(m, src)
+}
+func (m *ControllerInstallationList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallationList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallationList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallationList proto.InternalMessageInfo
+
+func (m *ControllerInstallationSpec) Reset() { *m = ControllerInstallationSpec{} }
+func (*ControllerInstallationSpec) ProtoMessage() {}
+func (*ControllerInstallationSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{28}
+}
+func (m *ControllerInstallationSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallationSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallationSpec.Merge(m, src)
+}
+func (m *ControllerInstallationSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallationSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallationSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallationSpec proto.InternalMessageInfo
+
+func (m *ControllerInstallationStatus) Reset() { *m = ControllerInstallationStatus{} }
+func (*ControllerInstallationStatus) ProtoMessage() {}
+func (*ControllerInstallationStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{29}
+}
+func (m *ControllerInstallationStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallationStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallationStatus.Merge(m, src)
+}
+func (m *ControllerInstallationStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallationStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallationStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallationStatus proto.InternalMessageInfo
+
+func (m *ControllerRegistration) Reset() { *m = ControllerRegistration{} }
+func (*ControllerRegistration) ProtoMessage() {}
+func (*ControllerRegistration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{30}
+}
+func (m *ControllerRegistration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRegistration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRegistration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRegistration.Merge(m, src)
+}
+func (m *ControllerRegistration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRegistration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRegistration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRegistration proto.InternalMessageInfo
+
+func (m *ControllerRegistrationList) Reset() { *m = ControllerRegistrationList{} }
+func (*ControllerRegistrationList) ProtoMessage() {}
+func (*ControllerRegistrationList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{31}
+}
+func (m *ControllerRegistrationList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRegistrationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRegistrationList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRegistrationList.Merge(m, src)
+}
+func (m *ControllerRegistrationList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRegistrationList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRegistrationList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRegistrationList proto.InternalMessageInfo
+
+func (m *ControllerRegistrationSpec) Reset() { *m = ControllerRegistrationSpec{} }
+func (*ControllerRegistrationSpec) ProtoMessage() {}
+func (*ControllerRegistrationSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{32}
+}
+func (m *ControllerRegistrationSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRegistrationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRegistrationSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRegistrationSpec.Merge(m, src)
+}
+func (m *ControllerRegistrationSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRegistrationSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRegistrationSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRegistrationSpec proto.InternalMessageInfo
+
+func (m *ControllerResource) Reset() { *m = ControllerResource{} }
+func (*ControllerResource) ProtoMessage() {}
+func (*ControllerResource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{33}
+}
+func (m *ControllerResource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerResource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerResource.Merge(m, src)
+}
+func (m *ControllerResource) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerResource) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerResource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerResource proto.InternalMessageInfo
+
+func (m *DNS) Reset() { *m = DNS{} }
+func (*DNS) ProtoMessage() {}
+func (*DNS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{34}
+}
+func (m *DNS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DNS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DNS.Merge(m, src)
+}
+func (m *DNS) XXX_Size() int {
+ return m.Size()
+}
+func (m *DNS) XXX_DiscardUnknown() {
+ xxx_messageInfo_DNS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DNS proto.InternalMessageInfo
+
+func (m *DNSIncludeExclude) Reset() { *m = DNSIncludeExclude{} }
+func (*DNSIncludeExclude) ProtoMessage() {}
+func (*DNSIncludeExclude) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{35}
+}
+func (m *DNSIncludeExclude) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DNSIncludeExclude) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DNSIncludeExclude) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DNSIncludeExclude.Merge(m, src)
+}
+func (m *DNSIncludeExclude) XXX_Size() int {
+ return m.Size()
+}
+func (m *DNSIncludeExclude) XXX_DiscardUnknown() {
+ xxx_messageInfo_DNSIncludeExclude.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DNSIncludeExclude proto.InternalMessageInfo
+
+func (m *DNSProvider) Reset() { *m = DNSProvider{} }
+func (*DNSProvider) ProtoMessage() {}
+func (*DNSProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{36}
+}
+func (m *DNSProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DNSProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DNSProvider.Merge(m, src)
+}
+func (m *DNSProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *DNSProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_DNSProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DNSProvider proto.InternalMessageInfo
+
+func (m *DataVolume) Reset() { *m = DataVolume{} }
+func (*DataVolume) ProtoMessage() {}
+func (*DataVolume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{37}
+}
+func (m *DataVolume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DataVolume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataVolume.Merge(m, src)
+}
+func (m *DataVolume) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataVolume) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataVolume proto.InternalMessageInfo
+
+func (m *Endpoint) Reset() { *m = Endpoint{} }
+func (*Endpoint) ProtoMessage() {}
+func (*Endpoint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{38}
+}
+func (m *Endpoint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Endpoint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Endpoint.Merge(m, src)
+}
+func (m *Endpoint) XXX_Size() int {
+ return m.Size()
+}
+func (m *Endpoint) XXX_DiscardUnknown() {
+ xxx_messageInfo_Endpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Endpoint proto.InternalMessageInfo
+
+func (m *ExpirableVersion) Reset() { *m = ExpirableVersion{} }
+func (*ExpirableVersion) ProtoMessage() {}
+func (*ExpirableVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{39}
+}
+func (m *ExpirableVersion) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExpirableVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExpirableVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExpirableVersion.Merge(m, src)
+}
+func (m *ExpirableVersion) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExpirableVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExpirableVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExpirableVersion proto.InternalMessageInfo
+
+func (m *Extension) Reset() { *m = Extension{} }
+func (*Extension) ProtoMessage() {}
+func (*Extension) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{40}
+}
+func (m *Extension) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Extension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Extension) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Extension.Merge(m, src)
+}
+func (m *Extension) XXX_Size() int {
+ return m.Size()
+}
+func (m *Extension) XXX_DiscardUnknown() {
+ xxx_messageInfo_Extension.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Extension proto.InternalMessageInfo
+
+func (m *ExtensionResourceState) Reset() { *m = ExtensionResourceState{} }
+func (*ExtensionResourceState) ProtoMessage() {}
+func (*ExtensionResourceState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{41}
+}
+func (m *ExtensionResourceState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExtensionResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExtensionResourceState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExtensionResourceState.Merge(m, src)
+}
+func (m *ExtensionResourceState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExtensionResourceState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExtensionResourceState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionResourceState proto.InternalMessageInfo
+
+func (m *Gardener) Reset() { *m = Gardener{} }
+func (*Gardener) ProtoMessage() {}
+func (*Gardener) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{42}
+}
+func (m *Gardener) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Gardener) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Gardener) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Gardener.Merge(m, src)
+}
+func (m *Gardener) XXX_Size() int {
+ return m.Size()
+}
+func (m *Gardener) XXX_DiscardUnknown() {
+ xxx_messageInfo_Gardener.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gardener proto.InternalMessageInfo
+
+func (m *GardenerResourceData) Reset() { *m = GardenerResourceData{} }
+func (*GardenerResourceData) ProtoMessage() {}
+func (*GardenerResourceData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{43}
+}
+func (m *GardenerResourceData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GardenerResourceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GardenerResourceData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GardenerResourceData.Merge(m, src)
+}
+func (m *GardenerResourceData) XXX_Size() int {
+ return m.Size()
+}
+func (m *GardenerResourceData) XXX_DiscardUnknown() {
+ xxx_messageInfo_GardenerResourceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GardenerResourceData proto.InternalMessageInfo
+
+func (m *Hibernation) Reset() { *m = Hibernation{} }
+func (*Hibernation) ProtoMessage() {}
+func (*Hibernation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{44}
+}
+func (m *Hibernation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Hibernation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Hibernation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Hibernation.Merge(m, src)
+}
+func (m *Hibernation) XXX_Size() int {
+ return m.Size()
+}
+func (m *Hibernation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Hibernation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Hibernation proto.InternalMessageInfo
+
+func (m *HibernationSchedule) Reset() { *m = HibernationSchedule{} }
+func (*HibernationSchedule) ProtoMessage() {}
+func (*HibernationSchedule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{45}
+}
+func (m *HibernationSchedule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HibernationSchedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HibernationSchedule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HibernationSchedule.Merge(m, src)
+}
+func (m *HibernationSchedule) XXX_Size() int {
+ return m.Size()
+}
+func (m *HibernationSchedule) XXX_DiscardUnknown() {
+ xxx_messageInfo_HibernationSchedule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HibernationSchedule proto.InternalMessageInfo
+
+func (m *HorizontalPodAutoscalerConfig) Reset() { *m = HorizontalPodAutoscalerConfig{} }
+func (*HorizontalPodAutoscalerConfig) ProtoMessage() {}
+func (*HorizontalPodAutoscalerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{46}
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HorizontalPodAutoscalerConfig.Merge(m, src)
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_HorizontalPodAutoscalerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HorizontalPodAutoscalerConfig proto.InternalMessageInfo
+
+func (m *Ingress) Reset() { *m = Ingress{} }
+func (*Ingress) ProtoMessage() {}
+func (*Ingress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{47}
+}
+func (m *Ingress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Ingress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Ingress.Merge(m, src)
+}
+func (m *Ingress) XXX_Size() int {
+ return m.Size()
+}
+func (m *Ingress) XXX_DiscardUnknown() {
+ xxx_messageInfo_Ingress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Ingress proto.InternalMessageInfo
+
+func (m *IngressController) Reset() { *m = IngressController{} }
+func (*IngressController) ProtoMessage() {}
+func (*IngressController) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{48}
+}
+func (m *IngressController) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressController) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressController.Merge(m, src)
+}
+func (m *IngressController) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressController) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressController.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressController proto.InternalMessageInfo
+
+func (m *KubeAPIServerConfig) Reset() { *m = KubeAPIServerConfig{} }
+func (*KubeAPIServerConfig) ProtoMessage() {}
+func (*KubeAPIServerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{49}
+}
+func (m *KubeAPIServerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeAPIServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeAPIServerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeAPIServerConfig.Merge(m, src)
+}
+func (m *KubeAPIServerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeAPIServerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeAPIServerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeAPIServerConfig proto.InternalMessageInfo
+
+func (m *KubeAPIServerRequests) Reset() { *m = KubeAPIServerRequests{} }
+func (*KubeAPIServerRequests) ProtoMessage() {}
+func (*KubeAPIServerRequests) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{50}
+}
+func (m *KubeAPIServerRequests) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeAPIServerRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeAPIServerRequests) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeAPIServerRequests.Merge(m, src)
+}
+func (m *KubeAPIServerRequests) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeAPIServerRequests) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeAPIServerRequests.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeAPIServerRequests proto.InternalMessageInfo
+
+func (m *KubeControllerManagerConfig) Reset() { *m = KubeControllerManagerConfig{} }
+func (*KubeControllerManagerConfig) ProtoMessage() {}
+func (*KubeControllerManagerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{51}
+}
+func (m *KubeControllerManagerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeControllerManagerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeControllerManagerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeControllerManagerConfig.Merge(m, src)
+}
+func (m *KubeControllerManagerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeControllerManagerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeControllerManagerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeControllerManagerConfig proto.InternalMessageInfo
+
+func (m *KubeProxyConfig) Reset() { *m = KubeProxyConfig{} }
+func (*KubeProxyConfig) ProtoMessage() {}
+func (*KubeProxyConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{52}
+}
+func (m *KubeProxyConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeProxyConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeProxyConfig.Merge(m, src)
+}
+func (m *KubeProxyConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeProxyConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeProxyConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeProxyConfig proto.InternalMessageInfo
+
+func (m *KubeSchedulerConfig) Reset() { *m = KubeSchedulerConfig{} }
+func (*KubeSchedulerConfig) ProtoMessage() {}
+func (*KubeSchedulerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{53}
+}
+func (m *KubeSchedulerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeSchedulerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeSchedulerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeSchedulerConfig.Merge(m, src)
+}
+func (m *KubeSchedulerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeSchedulerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeSchedulerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeSchedulerConfig proto.InternalMessageInfo
+
+func (m *KubeletConfig) Reset() { *m = KubeletConfig{} }
+func (*KubeletConfig) ProtoMessage() {}
+func (*KubeletConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{54}
+}
+func (m *KubeletConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfig.Merge(m, src)
+}
+func (m *KubeletConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfig proto.InternalMessageInfo
+
+func (m *KubeletConfigEviction) Reset() { *m = KubeletConfigEviction{} }
+func (*KubeletConfigEviction) ProtoMessage() {}
+func (*KubeletConfigEviction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{55}
+}
+func (m *KubeletConfigEviction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigEviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigEviction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigEviction.Merge(m, src)
+}
+func (m *KubeletConfigEviction) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigEviction) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigEviction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigEviction proto.InternalMessageInfo
+
+func (m *KubeletConfigEvictionMinimumReclaim) Reset() { *m = KubeletConfigEvictionMinimumReclaim{} }
+func (*KubeletConfigEvictionMinimumReclaim) ProtoMessage() {}
+func (*KubeletConfigEvictionMinimumReclaim) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{56}
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigEvictionMinimumReclaim.Merge(m, src)
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigEvictionMinimumReclaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigEvictionMinimumReclaim proto.InternalMessageInfo
+
+func (m *KubeletConfigEvictionSoftGracePeriod) Reset() { *m = KubeletConfigEvictionSoftGracePeriod{} }
+func (*KubeletConfigEvictionSoftGracePeriod) ProtoMessage() {}
+func (*KubeletConfigEvictionSoftGracePeriod) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{57}
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod.Merge(m, src)
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod proto.InternalMessageInfo
+
+func (m *KubeletConfigReserved) Reset() { *m = KubeletConfigReserved{} }
+func (*KubeletConfigReserved) ProtoMessage() {}
+func (*KubeletConfigReserved) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{58}
+}
+func (m *KubeletConfigReserved) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigReserved) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigReserved) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigReserved.Merge(m, src)
+}
+func (m *KubeletConfigReserved) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigReserved) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigReserved.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigReserved proto.InternalMessageInfo
+
+func (m *Kubernetes) Reset() { *m = Kubernetes{} }
+func (*Kubernetes) ProtoMessage() {}
+func (*Kubernetes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{59}
+}
+func (m *Kubernetes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Kubernetes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Kubernetes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Kubernetes.Merge(m, src)
+}
+func (m *Kubernetes) XXX_Size() int {
+ return m.Size()
+}
+func (m *Kubernetes) XXX_DiscardUnknown() {
+ xxx_messageInfo_Kubernetes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Kubernetes proto.InternalMessageInfo
+
+func (m *KubernetesConfig) Reset() { *m = KubernetesConfig{} }
+func (*KubernetesConfig) ProtoMessage() {}
+func (*KubernetesConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{60}
+}
+func (m *KubernetesConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesConfig.Merge(m, src)
+}
+func (m *KubernetesConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesConfig proto.InternalMessageInfo
+
+func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} }
+func (*KubernetesDashboard) ProtoMessage() {}
+func (*KubernetesDashboard) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{61}
+}
+func (m *KubernetesDashboard) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesDashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesDashboard) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesDashboard.Merge(m, src)
+}
+func (m *KubernetesDashboard) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesDashboard) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesDashboard.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesDashboard proto.InternalMessageInfo
+
+func (m *KubernetesInfo) Reset() { *m = KubernetesInfo{} }
+func (*KubernetesInfo) ProtoMessage() {}
+func (*KubernetesInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{62}
+}
+func (m *KubernetesInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesInfo.Merge(m, src)
+}
+func (m *KubernetesInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesInfo proto.InternalMessageInfo
+
+func (m *KubernetesSettings) Reset() { *m = KubernetesSettings{} }
+func (*KubernetesSettings) ProtoMessage() {}
+func (*KubernetesSettings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{63}
+}
+func (m *KubernetesSettings) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesSettings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesSettings.Merge(m, src)
+}
+func (m *KubernetesSettings) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesSettings) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesSettings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesSettings proto.InternalMessageInfo
+
+func (m *LastError) Reset() { *m = LastError{} }
+func (*LastError) ProtoMessage() {}
+func (*LastError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{64}
+}
+func (m *LastError) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LastError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LastError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LastError.Merge(m, src)
+}
+func (m *LastError) XXX_Size() int {
+ return m.Size()
+}
+func (m *LastError) XXX_DiscardUnknown() {
+ xxx_messageInfo_LastError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LastError proto.InternalMessageInfo
+
+func (m *LastOperation) Reset() { *m = LastOperation{} }
+func (*LastOperation) ProtoMessage() {}
+func (*LastOperation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{65}
+}
+func (m *LastOperation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LastOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LastOperation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LastOperation.Merge(m, src)
+}
+func (m *LastOperation) XXX_Size() int {
+ return m.Size()
+}
+func (m *LastOperation) XXX_DiscardUnknown() {
+ xxx_messageInfo_LastOperation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LastOperation proto.InternalMessageInfo
+
+func (m *Machine) Reset() { *m = Machine{} }
+func (*Machine) ProtoMessage() {}
+func (*Machine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{66}
+}
+func (m *Machine) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Machine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Machine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Machine.Merge(m, src)
+}
+func (m *Machine) XXX_Size() int {
+ return m.Size()
+}
+func (m *Machine) XXX_DiscardUnknown() {
+ xxx_messageInfo_Machine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Machine proto.InternalMessageInfo
+
+func (m *MachineControllerManagerSettings) Reset() { *m = MachineControllerManagerSettings{} }
+func (*MachineControllerManagerSettings) ProtoMessage() {}
+func (*MachineControllerManagerSettings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{67}
+}
+func (m *MachineControllerManagerSettings) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineControllerManagerSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineControllerManagerSettings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineControllerManagerSettings.Merge(m, src)
+}
+func (m *MachineControllerManagerSettings) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineControllerManagerSettings) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineControllerManagerSettings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineControllerManagerSettings proto.InternalMessageInfo
+
+func (m *MachineImage) Reset() { *m = MachineImage{} }
+func (*MachineImage) ProtoMessage() {}
+func (*MachineImage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{68}
+}
+func (m *MachineImage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineImage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineImage.Merge(m, src)
+}
+func (m *MachineImage) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineImage) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineImage proto.InternalMessageInfo
+
+func (m *MachineImageVersion) Reset() { *m = MachineImageVersion{} }
+func (*MachineImageVersion) ProtoMessage() {}
+func (*MachineImageVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{69}
+}
+func (m *MachineImageVersion) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineImageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineImageVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineImageVersion.Merge(m, src)
+}
+func (m *MachineImageVersion) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineImageVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineImageVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineImageVersion proto.InternalMessageInfo
+
+func (m *MachineType) Reset() { *m = MachineType{} }
+func (*MachineType) ProtoMessage() {}
+func (*MachineType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{70}
+}
+func (m *MachineType) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineType.Merge(m, src)
+}
+func (m *MachineType) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineType) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineType proto.InternalMessageInfo
+
+func (m *MachineTypeStorage) Reset() { *m = MachineTypeStorage{} }
+func (*MachineTypeStorage) ProtoMessage() {}
+func (*MachineTypeStorage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{71}
+}
+func (m *MachineTypeStorage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineTypeStorage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineTypeStorage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineTypeStorage.Merge(m, src)
+}
+func (m *MachineTypeStorage) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineTypeStorage) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineTypeStorage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineTypeStorage proto.InternalMessageInfo
+
+func (m *Maintenance) Reset() { *m = Maintenance{} }
+func (*Maintenance) ProtoMessage() {}
+func (*Maintenance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{72}
+}
+func (m *Maintenance) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Maintenance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Maintenance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Maintenance.Merge(m, src)
+}
+func (m *Maintenance) XXX_Size() int {
+ return m.Size()
+}
+func (m *Maintenance) XXX_DiscardUnknown() {
+ xxx_messageInfo_Maintenance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Maintenance proto.InternalMessageInfo
+
+func (m *MaintenanceAutoUpdate) Reset() { *m = MaintenanceAutoUpdate{} }
+func (*MaintenanceAutoUpdate) ProtoMessage() {}
+func (*MaintenanceAutoUpdate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{73}
+}
+func (m *MaintenanceAutoUpdate) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MaintenanceAutoUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MaintenanceAutoUpdate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MaintenanceAutoUpdate.Merge(m, src)
+}
+func (m *MaintenanceAutoUpdate) XXX_Size() int {
+ return m.Size()
+}
+func (m *MaintenanceAutoUpdate) XXX_DiscardUnknown() {
+ xxx_messageInfo_MaintenanceAutoUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MaintenanceAutoUpdate proto.InternalMessageInfo
+
+func (m *MaintenanceTimeWindow) Reset() { *m = MaintenanceTimeWindow{} }
+func (*MaintenanceTimeWindow) ProtoMessage() {}
+func (*MaintenanceTimeWindow) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{74}
+}
+func (m *MaintenanceTimeWindow) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MaintenanceTimeWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MaintenanceTimeWindow) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MaintenanceTimeWindow.Merge(m, src)
+}
+func (m *MaintenanceTimeWindow) XXX_Size() int {
+ return m.Size()
+}
+func (m *MaintenanceTimeWindow) XXX_DiscardUnknown() {
+ xxx_messageInfo_MaintenanceTimeWindow.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MaintenanceTimeWindow proto.InternalMessageInfo
+
+func (m *Monitoring) Reset() { *m = Monitoring{} }
+func (*Monitoring) ProtoMessage() {}
+func (*Monitoring) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{75}
+}
+func (m *Monitoring) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Monitoring) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Monitoring.Merge(m, src)
+}
+func (m *Monitoring) XXX_Size() int {
+ return m.Size()
+}
+func (m *Monitoring) XXX_DiscardUnknown() {
+ xxx_messageInfo_Monitoring.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Monitoring proto.InternalMessageInfo
+
+func (m *NamedResourceReference) Reset() { *m = NamedResourceReference{} }
+func (*NamedResourceReference) ProtoMessage() {}
+func (*NamedResourceReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{76}
+}
+func (m *NamedResourceReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedResourceReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedResourceReference.Merge(m, src)
+}
+func (m *NamedResourceReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedResourceReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedResourceReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedResourceReference proto.InternalMessageInfo
+
+func (m *Networking) Reset() { *m = Networking{} }
+func (*Networking) ProtoMessage() {}
+func (*Networking) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{77}
+}
+func (m *Networking) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Networking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Networking) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Networking.Merge(m, src)
+}
+func (m *Networking) XXX_Size() int {
+ return m.Size()
+}
+func (m *Networking) XXX_DiscardUnknown() {
+ xxx_messageInfo_Networking.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Networking proto.InternalMessageInfo
+
+func (m *NginxIngress) Reset() { *m = NginxIngress{} }
+func (*NginxIngress) ProtoMessage() {}
+func (*NginxIngress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{78}
+}
+func (m *NginxIngress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NginxIngress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxIngress.Merge(m, src)
+}
+func (m *NginxIngress) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxIngress) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxIngress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxIngress proto.InternalMessageInfo
+
+func (m *OIDCConfig) Reset() { *m = OIDCConfig{} }
+func (*OIDCConfig) ProtoMessage() {}
+func (*OIDCConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{79}
+}
+func (m *OIDCConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OIDCConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OIDCConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OIDCConfig.Merge(m, src)
+}
+func (m *OIDCConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *OIDCConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_OIDCConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OIDCConfig proto.InternalMessageInfo
+
+func (m *OpenIDConnectClientAuthentication) Reset() { *m = OpenIDConnectClientAuthentication{} }
+func (*OpenIDConnectClientAuthentication) ProtoMessage() {}
+func (*OpenIDConnectClientAuthentication) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{80}
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OpenIDConnectClientAuthentication.Merge(m, src)
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Size() int {
+ return m.Size()
+}
+func (m *OpenIDConnectClientAuthentication) XXX_DiscardUnknown() {
+ xxx_messageInfo_OpenIDConnectClientAuthentication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpenIDConnectClientAuthentication proto.InternalMessageInfo
+
+func (m *Plant) Reset() { *m = Plant{} }
+func (*Plant) ProtoMessage() {}
+func (*Plant) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{81}
+}
+func (m *Plant) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Plant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Plant) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Plant.Merge(m, src)
+}
+func (m *Plant) XXX_Size() int {
+ return m.Size()
+}
+func (m *Plant) XXX_DiscardUnknown() {
+ xxx_messageInfo_Plant.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Plant proto.InternalMessageInfo
+
+func (m *PlantList) Reset() { *m = PlantList{} }
+func (*PlantList) ProtoMessage() {}
+func (*PlantList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{82}
+}
+func (m *PlantList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PlantList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PlantList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlantList.Merge(m, src)
+}
+func (m *PlantList) XXX_Size() int {
+ return m.Size()
+}
+func (m *PlantList) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlantList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlantList proto.InternalMessageInfo
+
+func (m *PlantSpec) Reset() { *m = PlantSpec{} }
+func (*PlantSpec) ProtoMessage() {}
+func (*PlantSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{83}
+}
+func (m *PlantSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PlantSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PlantSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlantSpec.Merge(m, src)
+}
+func (m *PlantSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *PlantSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlantSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlantSpec proto.InternalMessageInfo
+
+func (m *PlantStatus) Reset() { *m = PlantStatus{} }
+func (*PlantStatus) ProtoMessage() {}
+func (*PlantStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{84}
+}
+func (m *PlantStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PlantStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PlantStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlantStatus.Merge(m, src)
+}
+func (m *PlantStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *PlantStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlantStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlantStatus proto.InternalMessageInfo
+
+func (m *Project) Reset() { *m = Project{} }
+func (*Project) ProtoMessage() {}
+func (*Project) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{85}
+}
+func (m *Project) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Project) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Project.Merge(m, src)
+}
+func (m *Project) XXX_Size() int {
+ return m.Size()
+}
+func (m *Project) XXX_DiscardUnknown() {
+ xxx_messageInfo_Project.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Project proto.InternalMessageInfo
+
+func (m *ProjectList) Reset() { *m = ProjectList{} }
+func (*ProjectList) ProtoMessage() {}
+func (*ProjectList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{86}
+}
+func (m *ProjectList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectList.Merge(m, src)
+}
+func (m *ProjectList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectList proto.InternalMessageInfo
+
+func (m *ProjectMember) Reset() { *m = ProjectMember{} }
+func (*ProjectMember) ProtoMessage() {}
+func (*ProjectMember) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{87}
+}
+func (m *ProjectMember) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectMember) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectMember.Merge(m, src)
+}
+func (m *ProjectMember) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectMember) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectMember proto.InternalMessageInfo
+
+func (m *ProjectSpec) Reset() { *m = ProjectSpec{} }
+func (*ProjectSpec) ProtoMessage() {}
+func (*ProjectSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{88}
+}
+func (m *ProjectSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectSpec.Merge(m, src)
+}
+func (m *ProjectSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo
+
+func (m *ProjectStatus) Reset() { *m = ProjectStatus{} }
+func (*ProjectStatus) ProtoMessage() {}
+func (*ProjectStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{89}
+}
+func (m *ProjectStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectStatus.Merge(m, src)
+}
+func (m *ProjectStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo
+
+func (m *ProjectTolerations) Reset() { *m = ProjectTolerations{} }
+func (*ProjectTolerations) ProtoMessage() {}
+func (*ProjectTolerations) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{90}
+}
+func (m *ProjectTolerations) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectTolerations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectTolerations) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectTolerations.Merge(m, src)
+}
+func (m *ProjectTolerations) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectTolerations) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectTolerations.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectTolerations proto.InternalMessageInfo
+
+func (m *Provider) Reset() { *m = Provider{} }
+func (*Provider) ProtoMessage() {}
+func (*Provider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{91}
+}
+func (m *Provider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Provider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Provider.Merge(m, src)
+}
+func (m *Provider) XXX_Size() int {
+ return m.Size()
+}
+func (m *Provider) XXX_DiscardUnknown() {
+ xxx_messageInfo_Provider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Provider proto.InternalMessageInfo
+
+func (m *Quota) Reset() { *m = Quota{} }
+func (*Quota) ProtoMessage() {}
+func (*Quota) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{92}
+}
+func (m *Quota) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Quota) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Quota.Merge(m, src)
+}
+func (m *Quota) XXX_Size() int {
+ return m.Size()
+}
+func (m *Quota) XXX_DiscardUnknown() {
+ xxx_messageInfo_Quota.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quota proto.InternalMessageInfo
+
+func (m *QuotaList) Reset() { *m = QuotaList{} }
+func (*QuotaList) ProtoMessage() {}
+func (*QuotaList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{93}
+}
+func (m *QuotaList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *QuotaList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuotaList.Merge(m, src)
+}
+func (m *QuotaList) XXX_Size() int {
+ return m.Size()
+}
+func (m *QuotaList) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuotaList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuotaList proto.InternalMessageInfo
+
+func (m *QuotaSpec) Reset() { *m = QuotaSpec{} }
+func (*QuotaSpec) ProtoMessage() {}
+func (*QuotaSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{94}
+}
+func (m *QuotaSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *QuotaSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuotaSpec.Merge(m, src)
+}
+func (m *QuotaSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *QuotaSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuotaSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuotaSpec proto.InternalMessageInfo
+
+func (m *Region) Reset() { *m = Region{} }
+func (*Region) ProtoMessage() {}
+func (*Region) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{95}
+}
+func (m *Region) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Region) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Region) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Region.Merge(m, src)
+}
+func (m *Region) XXX_Size() int {
+ return m.Size()
+}
+func (m *Region) XXX_DiscardUnknown() {
+ xxx_messageInfo_Region.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Region proto.InternalMessageInfo
+
+func (m *ResourceData) Reset() { *m = ResourceData{} }
+func (*ResourceData) ProtoMessage() {}
+func (*ResourceData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{96}
+}
+func (m *ResourceData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceData.Merge(m, src)
+}
+func (m *ResourceData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceData proto.InternalMessageInfo
+
+func (m *ResourceWatchCacheSize) Reset() { *m = ResourceWatchCacheSize{} }
+func (*ResourceWatchCacheSize) ProtoMessage() {}
+func (*ResourceWatchCacheSize) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{97}
+}
+func (m *ResourceWatchCacheSize) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceWatchCacheSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceWatchCacheSize) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceWatchCacheSize.Merge(m, src)
+}
+func (m *ResourceWatchCacheSize) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceWatchCacheSize) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceWatchCacheSize.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceWatchCacheSize proto.InternalMessageInfo
+
+func (m *SecretBinding) Reset() { *m = SecretBinding{} }
+func (*SecretBinding) ProtoMessage() {}
+func (*SecretBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{98}
+}
+func (m *SecretBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretBinding.Merge(m, src)
+}
+func (m *SecretBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretBinding proto.InternalMessageInfo
+
+func (m *SecretBindingList) Reset() { *m = SecretBindingList{} }
+func (*SecretBindingList) ProtoMessage() {}
+func (*SecretBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{99}
+}
+func (m *SecretBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretBindingList.Merge(m, src)
+}
+func (m *SecretBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretBindingList proto.InternalMessageInfo
+
+func (m *Seed) Reset() { *m = Seed{} }
+func (*Seed) ProtoMessage() {}
+func (*Seed) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{100}
+}
+func (m *Seed) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Seed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Seed) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Seed.Merge(m, src)
+}
+func (m *Seed) XXX_Size() int {
+ return m.Size()
+}
+func (m *Seed) XXX_DiscardUnknown() {
+ xxx_messageInfo_Seed.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Seed proto.InternalMessageInfo
+
+func (m *SeedBackup) Reset() { *m = SeedBackup{} }
+func (*SeedBackup) ProtoMessage() {}
+func (*SeedBackup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{101}
+}
+func (m *SeedBackup) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedBackup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedBackup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedBackup.Merge(m, src)
+}
+func (m *SeedBackup) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedBackup) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedBackup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedBackup proto.InternalMessageInfo
+
+func (m *SeedDNS) Reset() { *m = SeedDNS{} }
+func (*SeedDNS) ProtoMessage() {}
+func (*SeedDNS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{102}
+}
+func (m *SeedDNS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedDNS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedDNS.Merge(m, src)
+}
+func (m *SeedDNS) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedDNS) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedDNS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedDNS proto.InternalMessageInfo
+
+func (m *SeedDNSProvider) Reset() { *m = SeedDNSProvider{} }
+func (*SeedDNSProvider) ProtoMessage() {}
+func (*SeedDNSProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{103}
+}
+func (m *SeedDNSProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedDNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedDNSProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedDNSProvider.Merge(m, src)
+}
+func (m *SeedDNSProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedDNSProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedDNSProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedDNSProvider proto.InternalMessageInfo
+
+func (m *SeedList) Reset() { *m = SeedList{} }
+func (*SeedList) ProtoMessage() {}
+func (*SeedList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{104}
+}
+func (m *SeedList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedList.Merge(m, src)
+}
+func (m *SeedList) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedList) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedList proto.InternalMessageInfo
+
+func (m *SeedNetworks) Reset() { *m = SeedNetworks{} }
+func (*SeedNetworks) ProtoMessage() {}
+func (*SeedNetworks) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{105}
+}
+func (m *SeedNetworks) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedNetworks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedNetworks) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedNetworks.Merge(m, src)
+}
+func (m *SeedNetworks) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedNetworks) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedNetworks.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedNetworks proto.InternalMessageInfo
+
+func (m *SeedProvider) Reset() { *m = SeedProvider{} }
+func (*SeedProvider) ProtoMessage() {}
+func (*SeedProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{106}
+}
+func (m *SeedProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedProvider.Merge(m, src)
+}
+func (m *SeedProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedProvider proto.InternalMessageInfo
+
+func (m *SeedSelector) Reset() { *m = SeedSelector{} }
+func (*SeedSelector) ProtoMessage() {}
+func (*SeedSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{107}
+}
+func (m *SeedSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSelector.Merge(m, src)
+}
+func (m *SeedSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSelector proto.InternalMessageInfo
+
+func (m *SeedSettingExcessCapacityReservation) Reset() { *m = SeedSettingExcessCapacityReservation{} }
+func (*SeedSettingExcessCapacityReservation) ProtoMessage() {}
+func (*SeedSettingExcessCapacityReservation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{108}
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingExcessCapacityReservation.Merge(m, src)
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingExcessCapacityReservation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingExcessCapacityReservation proto.InternalMessageInfo
+
+func (m *SeedSettingLoadBalancerServices) Reset() { *m = SeedSettingLoadBalancerServices{} }
+func (*SeedSettingLoadBalancerServices) ProtoMessage() {}
+func (*SeedSettingLoadBalancerServices) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{109}
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingLoadBalancerServices.Merge(m, src)
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingLoadBalancerServices) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingLoadBalancerServices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingLoadBalancerServices proto.InternalMessageInfo
+
+func (m *SeedSettingScheduling) Reset() { *m = SeedSettingScheduling{} }
+func (*SeedSettingScheduling) ProtoMessage() {}
+func (*SeedSettingScheduling) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{110}
+}
+func (m *SeedSettingScheduling) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingScheduling) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingScheduling.Merge(m, src)
+}
+func (m *SeedSettingScheduling) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingScheduling) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingScheduling.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingScheduling proto.InternalMessageInfo
+
+func (m *SeedSettingShootDNS) Reset() { *m = SeedSettingShootDNS{} }
+func (*SeedSettingShootDNS) ProtoMessage() {}
+func (*SeedSettingShootDNS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{111}
+}
+func (m *SeedSettingShootDNS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingShootDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingShootDNS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingShootDNS.Merge(m, src)
+}
+func (m *SeedSettingShootDNS) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingShootDNS) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingShootDNS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingShootDNS proto.InternalMessageInfo
+
+func (m *SeedSettingVerticalPodAutoscaler) Reset() { *m = SeedSettingVerticalPodAutoscaler{} }
+func (*SeedSettingVerticalPodAutoscaler) ProtoMessage() {}
+func (*SeedSettingVerticalPodAutoscaler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{112}
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingVerticalPodAutoscaler.Merge(m, src)
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingVerticalPodAutoscaler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingVerticalPodAutoscaler proto.InternalMessageInfo
+
+func (m *SeedSettings) Reset() { *m = SeedSettings{} }
+func (*SeedSettings) ProtoMessage() {}
+func (*SeedSettings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{113}
+}
+func (m *SeedSettings) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettings.Merge(m, src)
+}
+func (m *SeedSettings) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettings) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettings proto.InternalMessageInfo
+
+func (m *SeedSpec) Reset() { *m = SeedSpec{} }
+func (*SeedSpec) ProtoMessage() {}
+func (*SeedSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{114}
+}
+func (m *SeedSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSpec.Merge(m, src)
+}
+func (m *SeedSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSpec proto.InternalMessageInfo
+
+func (m *SeedStatus) Reset() { *m = SeedStatus{} }
+func (*SeedStatus) ProtoMessage() {}
+func (*SeedStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{115}
+}
+func (m *SeedStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedStatus.Merge(m, src)
+}
+func (m *SeedStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedStatus proto.InternalMessageInfo
+
+func (m *SeedTaint) Reset() { *m = SeedTaint{} }
+func (*SeedTaint) ProtoMessage() {}
+func (*SeedTaint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{116}
+}
+func (m *SeedTaint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedTaint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedTaint.Merge(m, src)
+}
+func (m *SeedTaint) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedTaint) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedTaint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedTaint proto.InternalMessageInfo
+
+func (m *SeedVolume) Reset() { *m = SeedVolume{} }
+func (*SeedVolume) ProtoMessage() {}
+func (*SeedVolume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{117}
+}
+func (m *SeedVolume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedVolume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedVolume.Merge(m, src)
+}
+func (m *SeedVolume) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedVolume) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedVolume proto.InternalMessageInfo
+
+func (m *SeedVolumeProvider) Reset() { *m = SeedVolumeProvider{} }
+func (*SeedVolumeProvider) ProtoMessage() {}
+func (*SeedVolumeProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{118}
+}
+func (m *SeedVolumeProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedVolumeProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedVolumeProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedVolumeProvider.Merge(m, src)
+}
+func (m *SeedVolumeProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedVolumeProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedVolumeProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedVolumeProvider proto.InternalMessageInfo
+
+func (m *ServiceAccountConfig) Reset() { *m = ServiceAccountConfig{} }
+func (*ServiceAccountConfig) ProtoMessage() {}
+func (*ServiceAccountConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{119}
+}
+func (m *ServiceAccountConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceAccountConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceAccountConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceAccountConfig.Merge(m, src)
+}
+func (m *ServiceAccountConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceAccountConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceAccountConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccountConfig proto.InternalMessageInfo
+
+func (m *Shoot) Reset() { *m = Shoot{} }
+func (*Shoot) ProtoMessage() {}
+func (*Shoot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{120}
+}
+func (m *Shoot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Shoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Shoot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Shoot.Merge(m, src)
+}
+func (m *Shoot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Shoot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Shoot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Shoot proto.InternalMessageInfo
+
+func (m *ShootList) Reset() { *m = ShootList{} }
+func (*ShootList) ProtoMessage() {}
+func (*ShootList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{121}
+}
+func (m *ShootList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootList.Merge(m, src)
+}
+func (m *ShootList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootList proto.InternalMessageInfo
+
+func (m *ShootMachineImage) Reset() { *m = ShootMachineImage{} }
+func (*ShootMachineImage) ProtoMessage() {}
+func (*ShootMachineImage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{122}
+}
+func (m *ShootMachineImage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootMachineImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootMachineImage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootMachineImage.Merge(m, src)
+}
+func (m *ShootMachineImage) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootMachineImage) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootMachineImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootMachineImage proto.InternalMessageInfo
+
+func (m *ShootNetworks) Reset() { *m = ShootNetworks{} }
+func (*ShootNetworks) ProtoMessage() {}
+func (*ShootNetworks) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{123}
+}
+func (m *ShootNetworks) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootNetworks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootNetworks) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootNetworks.Merge(m, src)
+}
+func (m *ShootNetworks) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootNetworks) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootNetworks.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootNetworks proto.InternalMessageInfo
+
+func (m *ShootSpec) Reset() { *m = ShootSpec{} }
+func (*ShootSpec) ProtoMessage() {}
+func (*ShootSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{124}
+}
+func (m *ShootSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootSpec.Merge(m, src)
+}
+func (m *ShootSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootSpec proto.InternalMessageInfo
+
+func (m *ShootState) Reset() { *m = ShootState{} }
+func (*ShootState) ProtoMessage() {}
+func (*ShootState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{125}
+}
+func (m *ShootState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootState.Merge(m, src)
+}
+func (m *ShootState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootState proto.InternalMessageInfo
+
+func (m *ShootStateList) Reset() { *m = ShootStateList{} }
+func (*ShootStateList) ProtoMessage() {}
+func (*ShootStateList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{126}
+}
+func (m *ShootStateList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootStateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootStateList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootStateList.Merge(m, src)
+}
+func (m *ShootStateList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootStateList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootStateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootStateList proto.InternalMessageInfo
+
+func (m *ShootStateSpec) Reset() { *m = ShootStateSpec{} }
+func (*ShootStateSpec) ProtoMessage() {}
+func (*ShootStateSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{127}
+}
+func (m *ShootStateSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootStateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootStateSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootStateSpec.Merge(m, src)
+}
+func (m *ShootStateSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootStateSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootStateSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootStateSpec proto.InternalMessageInfo
+
+func (m *ShootStatus) Reset() { *m = ShootStatus{} }
+func (*ShootStatus) ProtoMessage() {}
+func (*ShootStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{128}
+}
+func (m *ShootStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootStatus.Merge(m, src)
+}
+func (m *ShootStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootStatus proto.InternalMessageInfo
+
+func (m *Toleration) Reset() { *m = Toleration{} }
+func (*Toleration) ProtoMessage() {}
+func (*Toleration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{129}
+}
+func (m *Toleration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Toleration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Toleration.Merge(m, src)
+}
+func (m *Toleration) XXX_Size() int {
+ return m.Size()
+}
+func (m *Toleration) XXX_DiscardUnknown() {
+ xxx_messageInfo_Toleration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Toleration proto.InternalMessageInfo
+
+func (m *VerticalPodAutoscaler) Reset() { *m = VerticalPodAutoscaler{} }
+func (*VerticalPodAutoscaler) ProtoMessage() {}
+func (*VerticalPodAutoscaler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{130}
+}
+func (m *VerticalPodAutoscaler) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *VerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *VerticalPodAutoscaler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VerticalPodAutoscaler.Merge(m, src)
+}
+func (m *VerticalPodAutoscaler) XXX_Size() int {
+ return m.Size()
+}
+func (m *VerticalPodAutoscaler) XXX_DiscardUnknown() {
+ xxx_messageInfo_VerticalPodAutoscaler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VerticalPodAutoscaler proto.InternalMessageInfo
+
+func (m *Volume) Reset() { *m = Volume{} }
+func (*Volume) ProtoMessage() {}
+func (*Volume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{131}
+}
+func (m *Volume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Volume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Volume.Merge(m, src)
+}
+func (m *Volume) XXX_Size() int {
+ return m.Size()
+}
+func (m *Volume) XXX_DiscardUnknown() {
+ xxx_messageInfo_Volume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Volume proto.InternalMessageInfo
+
+func (m *VolumeType) Reset() { *m = VolumeType{} }
+func (*VolumeType) ProtoMessage() {}
+func (*VolumeType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{132}
+}
+func (m *VolumeType) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *VolumeType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *VolumeType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VolumeType.Merge(m, src)
+}
+func (m *VolumeType) XXX_Size() int {
+ return m.Size()
+}
+func (m *VolumeType) XXX_DiscardUnknown() {
+ xxx_messageInfo_VolumeType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeType proto.InternalMessageInfo
+
+func (m *WatchCacheSizes) Reset() { *m = WatchCacheSizes{} }
+func (*WatchCacheSizes) ProtoMessage() {}
+func (*WatchCacheSizes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{133}
+}
+func (m *WatchCacheSizes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchCacheSizes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WatchCacheSizes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchCacheSizes.Merge(m, src)
+}
+func (m *WatchCacheSizes) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchCacheSizes) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchCacheSizes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCacheSizes proto.InternalMessageInfo
+
+func (m *Worker) Reset() { *m = Worker{} }
+func (*Worker) ProtoMessage() {}
+func (*Worker) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{134}
+}
+func (m *Worker) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Worker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Worker) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Worker.Merge(m, src)
+}
+func (m *Worker) XXX_Size() int {
+ return m.Size()
+}
+func (m *Worker) XXX_DiscardUnknown() {
+ xxx_messageInfo_Worker.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Worker proto.InternalMessageInfo
+
+func (m *WorkerKubernetes) Reset() { *m = WorkerKubernetes{} }
+func (*WorkerKubernetes) ProtoMessage() {}
+func (*WorkerKubernetes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{135}
+}
+func (m *WorkerKubernetes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WorkerKubernetes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WorkerKubernetes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WorkerKubernetes.Merge(m, src)
+}
+func (m *WorkerKubernetes) XXX_Size() int {
+ return m.Size()
+}
+func (m *WorkerKubernetes) XXX_DiscardUnknown() {
+ xxx_messageInfo_WorkerKubernetes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WorkerKubernetes proto.InternalMessageInfo
+
+func (m *WorkerSystemComponents) Reset() { *m = WorkerSystemComponents{} }
+func (*WorkerSystemComponents) ProtoMessage() {}
+func (*WorkerSystemComponents) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f1caaec5647a9dbf, []int{136}
+}
+func (m *WorkerSystemComponents) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WorkerSystemComponents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WorkerSystemComponents) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WorkerSystemComponents.Merge(m, src)
+}
+func (m *WorkerSystemComponents) XXX_Size() int {
+ return m.Size()
+}
+func (m *WorkerSystemComponents) XXX_DiscardUnknown() {
+ xxx_messageInfo_WorkerSystemComponents.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WorkerSystemComponents proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Addon)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Addon")
+ proto.RegisterType((*Addons)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Addons")
+ proto.RegisterType((*AdmissionPlugin)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AdmissionPlugin")
+ proto.RegisterType((*Alerting)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Alerting")
+ proto.RegisterType((*AuditConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AuditConfig")
+ proto.RegisterType((*AuditPolicy)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AuditPolicy")
+ proto.RegisterType((*AvailabilityZone)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AvailabilityZone")
+ proto.RegisterType((*BackupBucket)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucket")
+ proto.RegisterType((*BackupBucketList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketList")
+ proto.RegisterType((*BackupBucketProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketProvider")
+ proto.RegisterType((*BackupBucketSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketSpec")
+ proto.RegisterType((*BackupBucketStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketStatus")
+ proto.RegisterType((*BackupEntry)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntry")
+ proto.RegisterType((*BackupEntryList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntryList")
+ proto.RegisterType((*BackupEntrySpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntrySpec")
+ proto.RegisterType((*BackupEntryStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntryStatus")
+ proto.RegisterType((*CRI)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CRI")
+ proto.RegisterType((*CloudInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudInfo")
+ proto.RegisterType((*CloudProfile)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudProfile")
+ proto.RegisterType((*CloudProfileList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudProfileList")
+ proto.RegisterType((*CloudProfileSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudProfileSpec")
+ proto.RegisterType((*ClusterAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ClusterAutoscaler")
+ proto.RegisterType((*ClusterInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ClusterInfo")
+ proto.RegisterType((*Condition)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Condition")
+ proto.RegisterType((*ContainerRuntime)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ContainerRuntime")
+ proto.RegisterType((*ControllerDeployment)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerDeployment")
+ proto.RegisterType((*ControllerInstallation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallation")
+ proto.RegisterType((*ControllerInstallationList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallationList")
+ proto.RegisterType((*ControllerInstallationSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallationSpec")
+ proto.RegisterType((*ControllerInstallationStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallationStatus")
+ proto.RegisterType((*ControllerRegistration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerRegistration")
+ proto.RegisterType((*ControllerRegistrationList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerRegistrationList")
+ proto.RegisterType((*ControllerRegistrationSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerRegistrationSpec")
+ proto.RegisterType((*ControllerResource)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerResource")
+ proto.RegisterType((*DNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DNS")
+ proto.RegisterType((*DNSIncludeExclude)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DNSIncludeExclude")
+ proto.RegisterType((*DNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DNSProvider")
+ proto.RegisterType((*DataVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DataVolume")
+ proto.RegisterType((*Endpoint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Endpoint")
+ proto.RegisterType((*ExpirableVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ExpirableVersion")
+ proto.RegisterType((*Extension)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Extension")
+ proto.RegisterType((*ExtensionResourceState)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ExtensionResourceState")
+ proto.RegisterType((*Gardener)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Gardener")
+ proto.RegisterType((*GardenerResourceData)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.GardenerResourceData")
+ proto.RegisterType((*Hibernation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Hibernation")
+ proto.RegisterType((*HibernationSchedule)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.HibernationSchedule")
+ proto.RegisterType((*HorizontalPodAutoscalerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.HorizontalPodAutoscalerConfig")
+ proto.RegisterType((*Ingress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Ingress")
+ proto.RegisterType((*IngressController)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.IngressController")
+ proto.RegisterType((*KubeAPIServerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeAPIServerConfig")
+ proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeAPIServerConfig.RuntimeConfigEntry")
+ proto.RegisterType((*KubeAPIServerRequests)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeAPIServerRequests")
+ proto.RegisterType((*KubeControllerManagerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeControllerManagerConfig")
+ proto.RegisterType((*KubeProxyConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeProxyConfig")
+ proto.RegisterType((*KubeSchedulerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeSchedulerConfig")
+ proto.RegisterType((*KubeletConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfig")
+ proto.RegisterType((*KubeletConfigEviction)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigEviction")
+ proto.RegisterType((*KubeletConfigEvictionMinimumReclaim)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigEvictionMinimumReclaim")
+ proto.RegisterType((*KubeletConfigEvictionSoftGracePeriod)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigEvictionSoftGracePeriod")
+ proto.RegisterType((*KubeletConfigReserved)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigReserved")
+ proto.RegisterType((*Kubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Kubernetes")
+ proto.RegisterType((*KubernetesConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesConfig")
+ proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesConfig.FeatureGatesEntry")
+ proto.RegisterType((*KubernetesDashboard)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesDashboard")
+ proto.RegisterType((*KubernetesInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesInfo")
+ proto.RegisterType((*KubernetesSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesSettings")
+ proto.RegisterType((*LastError)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.LastError")
+ proto.RegisterType((*LastOperation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.LastOperation")
+ proto.RegisterType((*Machine)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Machine")
+ proto.RegisterType((*MachineControllerManagerSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineControllerManagerSettings")
+ proto.RegisterType((*MachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineImage")
+ proto.RegisterType((*MachineImageVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineImageVersion")
+ proto.RegisterType((*MachineType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineType")
+ proto.RegisterType((*MachineTypeStorage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineTypeStorage")
+ proto.RegisterType((*Maintenance)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Maintenance")
+ proto.RegisterType((*MaintenanceAutoUpdate)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MaintenanceAutoUpdate")
+ proto.RegisterType((*MaintenanceTimeWindow)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MaintenanceTimeWindow")
+ proto.RegisterType((*Monitoring)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Monitoring")
+ proto.RegisterType((*NamedResourceReference)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.NamedResourceReference")
+ proto.RegisterType((*Networking)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Networking")
+ proto.RegisterType((*NginxIngress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.NginxIngress")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.NginxIngress.ConfigEntry")
+ proto.RegisterType((*OIDCConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OIDCConfig")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OIDCConfig.RequiredClaimsEntry")
+ proto.RegisterType((*OpenIDConnectClientAuthentication)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OpenIDConnectClientAuthentication")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OpenIDConnectClientAuthentication.ExtraConfigEntry")
+ proto.RegisterType((*Plant)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Plant")
+ proto.RegisterType((*PlantList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.PlantList")
+ proto.RegisterType((*PlantSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.PlantSpec")
+ proto.RegisterType((*PlantStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.PlantStatus")
+ proto.RegisterType((*Project)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Project")
+ proto.RegisterType((*ProjectList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectList")
+ proto.RegisterType((*ProjectMember)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectMember")
+ proto.RegisterType((*ProjectSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectSpec")
+ proto.RegisterType((*ProjectStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectStatus")
+ proto.RegisterType((*ProjectTolerations)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectTolerations")
+ proto.RegisterType((*Provider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Provider")
+ proto.RegisterType((*Quota)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Quota")
+ proto.RegisterType((*QuotaList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.QuotaList")
+ proto.RegisterType((*QuotaSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.QuotaSpec")
+ proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.QuotaSpec.MetricsEntry")
+ proto.RegisterType((*Region)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Region")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Region.LabelsEntry")
+ proto.RegisterType((*ResourceData)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ResourceData")
+ proto.RegisterType((*ResourceWatchCacheSize)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ResourceWatchCacheSize")
+ proto.RegisterType((*SecretBinding)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SecretBinding")
+ proto.RegisterType((*SecretBindingList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SecretBindingList")
+ proto.RegisterType((*Seed)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Seed")
+ proto.RegisterType((*SeedBackup)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedBackup")
+ proto.RegisterType((*SeedDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedDNS")
+ proto.RegisterType((*SeedDNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedDNSProvider")
+ proto.RegisterType((*SeedList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedList")
+ proto.RegisterType((*SeedNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedNetworks")
+ proto.RegisterType((*SeedProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedProvider")
+ proto.RegisterType((*SeedSelector)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSelector")
+ proto.RegisterType((*SeedSettingExcessCapacityReservation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingExcessCapacityReservation")
+ proto.RegisterType((*SeedSettingLoadBalancerServices)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingLoadBalancerServices")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingLoadBalancerServices.AnnotationsEntry")
+ proto.RegisterType((*SeedSettingScheduling)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingScheduling")
+ proto.RegisterType((*SeedSettingShootDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingShootDNS")
+ proto.RegisterType((*SeedSettingVerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingVerticalPodAutoscaler")
+ proto.RegisterType((*SeedSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettings")
+ proto.RegisterType((*SeedSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSpec")
+ proto.RegisterType((*SeedStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedStatus")
+ proto.RegisterType((*SeedTaint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedTaint")
+ proto.RegisterType((*SeedVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedVolume")
+ proto.RegisterType((*SeedVolumeProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedVolumeProvider")
+ proto.RegisterType((*ServiceAccountConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ServiceAccountConfig")
+ proto.RegisterType((*Shoot)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Shoot")
+ proto.RegisterType((*ShootList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootList")
+ proto.RegisterType((*ShootMachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootMachineImage")
+ proto.RegisterType((*ShootNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootNetworks")
+ proto.RegisterType((*ShootSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootSpec")
+ proto.RegisterType((*ShootState)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootState")
+ proto.RegisterType((*ShootStateList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootStateList")
+ proto.RegisterType((*ShootStateSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootStateSpec")
+ proto.RegisterType((*ShootStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootStatus")
+ proto.RegisterType((*Toleration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Toleration")
+ proto.RegisterType((*VerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.VerticalPodAutoscaler")
+ proto.RegisterType((*Volume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Volume")
+ proto.RegisterType((*VolumeType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.VolumeType")
+ proto.RegisterType((*WatchCacheSizes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.WatchCacheSizes")
+ proto.RegisterType((*Worker)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Worker")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Worker.AnnotationsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Worker.LabelsEntry")
+ proto.RegisterType((*WorkerKubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.WorkerKubernetes")
+ proto.RegisterType((*WorkerSystemComponents)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.WorkerSystemComponents")
+}
+
+func init() {
+ proto.RegisterFile("github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto", fileDescriptor_f1caaec5647a9dbf)
+}
+
+var fileDescriptor_f1caaec5647a9dbf = []byte{
+ // 9050 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7d, 0x6d, 0x70, 0x24, 0xc7,
+ 0x75, 0x98, 0x66, 0xf1, 0xb5, 0x78, 0x0b, 0xe0, 0x80, 0xbe, 0x0f, 0x82, 0x47, 0xf2, 0x96, 0x1a,
+ 0x89, 0x0a, 0x19, 0xc9, 0x38, 0x53, 0x94, 0x2c, 0x91, 0x22, 0x29, 0x62, 0x77, 0x71, 0x77, 0xab,
+ 0x3b, 0xdc, 0x2d, 0x7b, 0x0f, 0xa4, 0x3e, 0x52, 0xa6, 0x06, 0x33, 0x8d, 0xc5, 0x10, 0xb3, 0x33,
+ 0xcb, 0x99, 0x59, 0x1c, 0x70, 0x74, 0x22, 0x45, 0xb6, 0xe5, 0x24, 0x4e, 0x5c, 0x71, 0x59, 0x29,
+ 0xa5, 0x22, 0x95, 0x6d, 0xb9, 0x52, 0x29, 0xa7, 0xf2, 0x21, 0x97, 0x2b, 0x56, 0xca, 0x95, 0x94,
+ 0x7f, 0xd8, 0x3f, 0xac, 0xc8, 0x29, 0x47, 0x95, 0x72, 0x5c, 0x8a, 0xcb, 0x81, 0x22, 0xa4, 0x2a,
+ 0xdf, 0xc9, 0x9f, 0x54, 0xfe, 0x5c, 0x39, 0x29, 0x57, 0x7f, 0x4c, 0x4f, 0xcf, 0xec, 0x0c, 0xb0,
+ 0x98, 0xc1, 0x81, 0xe4, 0x2f, 0x60, 0xfb, 0x75, 0xbf, 0xd7, 0xd3, 0xfd, 0xfa, 0xf5, 0x7b, 0xaf,
+ 0x5f, 0xbf, 0x86, 0x66, 0xcf, 0x0e, 0xb7, 0x87, 0x9b, 0x2b, 0xa6, 0xd7, 0xbf, 0xda, 0x33, 0x7c,
+ 0x8b, 0xb8, 0xc4, 0x8f, 0xff, 0x19, 0xec, 0xf4, 0xae, 0x1a, 0x03, 0x3b, 0xb8, 0x6a, 0x7a, 0x3e,
+ 0xb9, 0xba, 0xfb, 0xac, 0xe1, 0x0c, 0xb6, 0x8d, 0x67, 0xaf, 0xf6, 0x28, 0xd0, 0x08, 0x89, 0xb5,
+ 0x32, 0xf0, 0xbd, 0xd0, 0x43, 0xcf, 0xc5, 0x48, 0x56, 0xa2, 0xb6, 0xf1, 0x3f, 0x83, 0x9d, 0xde,
+ 0x0a, 0x45, 0xb2, 0x42, 0x91, 0xac, 0x44, 0x48, 0x2e, 0x37, 0x4e, 0x44, 0x79, 0x93, 0x84, 0xa3,
+ 0x84, 0x2f, 0xff, 0x98, 0x8a, 0xc3, 0xeb, 0x79, 0x57, 0x59, 0xf1, 0xe6, 0x70, 0x8b, 0xfd, 0x62,
+ 0x3f, 0xd8, 0x7f, 0xa2, 0xfa, 0x33, 0x3b, 0x9f, 0x0c, 0x56, 0x6c, 0x8f, 0x22, 0xbe, 0x6a, 0x0c,
+ 0x43, 0x2f, 0x30, 0x0d, 0xc7, 0x76, 0x7b, 0x57, 0x77, 0x47, 0x31, 0xeb, 0x4a, 0x55, 0xd1, 0x85,
+ 0x23, 0xeb, 0xf8, 0x9b, 0x86, 0x99, 0x55, 0xe7, 0x63, 0x71, 0x9d, 0xbe, 0x61, 0x6e, 0xdb, 0x2e,
+ 0xf1, 0xf7, 0xa3, 0x8f, 0xbb, 0xea, 0x93, 0xc0, 0x1b, 0xfa, 0x26, 0x39, 0x51, 0xab, 0xe0, 0x6a,
+ 0x9f, 0x84, 0x46, 0x16, 0xad, 0xab, 0x79, 0xad, 0xfc, 0xa1, 0x1b, 0xda, 0xfd, 0x51, 0x32, 0x3f,
+ 0x71, 0x5c, 0x83, 0xc0, 0xdc, 0x26, 0x7d, 0x63, 0xa4, 0xdd, 0x73, 0x79, 0xed, 0x86, 0xa1, 0xed,
+ 0x5c, 0xb5, 0xdd, 0x30, 0x08, 0xfd, 0x74, 0x23, 0xfd, 0xa3, 0x30, 0xb5, 0x6a, 0x59, 0x9e, 0x8b,
+ 0x9e, 0x81, 0x19, 0xe2, 0x1a, 0x9b, 0x0e, 0xb1, 0x96, 0xb5, 0x27, 0xb5, 0xa7, 0xab, 0x8d, 0x73,
+ 0xdf, 0x3d, 0xa8, 0xbf, 0xef, 0xf0, 0xa0, 0x3e, 0xb3, 0xc6, 0x8b, 0x71, 0x04, 0xd7, 0xbf, 0x5e,
+ 0x81, 0x69, 0xd6, 0x28, 0x40, 0xbf, 0xa4, 0xc1, 0xf9, 0x9d, 0xe1, 0x26, 0xf1, 0x5d, 0x12, 0x92,
+ 0xa0, 0x65, 0x04, 0xdb, 0x9b, 0x9e, 0xe1, 0x73, 0x14, 0xb5, 0x8f, 0xde, 0x58, 0x29, 0xc0, 0x82,
+ 0x2b, 0x37, 0x47, 0xf1, 0x35, 0x1e, 0x39, 0x3c, 0xa8, 0x9f, 0xcf, 0x00, 0xe0, 0x2c, 0xea, 0xe8,
+ 0x1e, 0xcc, 0xb9, 0x3d, 0xdb, 0xdd, 0x6b, 0xbb, 0x3d, 0x9f, 0x04, 0xc1, 0x72, 0x85, 0xf5, 0x66,
+ 0xb5, 0x50, 0x6f, 0x6e, 0x2b, 0x88, 0x1a, 0x8b, 0x87, 0x07, 0xf5, 0x39, 0xb5, 0x04, 0x27, 0x08,
+ 0xe9, 0x5f, 0xd5, 0xe0, 0xdc, 0xaa, 0xd5, 0xb7, 0x83, 0xc0, 0xf6, 0xdc, 0x8e, 0x33, 0xec, 0xd9,
+ 0x2e, 0x7a, 0x12, 0x26, 0x5d, 0xa3, 0x4f, 0xd8, 0x90, 0xcc, 0x36, 0xe6, 0xc4, 0xa8, 0x4e, 0xde,
+ 0x36, 0xfa, 0x04, 0x33, 0x08, 0x7a, 0x15, 0xa6, 0x4d, 0xcf, 0xdd, 0xb2, 0x7b, 0xa2, 0xa3, 0x3f,
+ 0xb6, 0xc2, 0x67, 0x72, 0x45, 0x9d, 0x49, 0xd6, 0x3f, 0xc1, 0x01, 0x2b, 0xd8, 0xb8, 0xb7, 0xb6,
+ 0x17, 0x12, 0x97, 0x92, 0x69, 0xc0, 0xe1, 0x41, 0x7d, 0xba, 0xc9, 0x10, 0x60, 0x81, 0x48, 0xbf,
+ 0x06, 0xd5, 0x55, 0x87, 0xf8, 0xa1, 0xed, 0xf6, 0xd0, 0x0b, 0xb0, 0x40, 0xfa, 0x86, 0xed, 0x60,
+ 0x62, 0x12, 0x7b, 0x97, 0xf8, 0xc1, 0xb2, 0xf6, 0xe4, 0xc4, 0xd3, 0xb3, 0x0d, 0x74, 0x78, 0x50,
+ 0x5f, 0x58, 0x4b, 0x40, 0x70, 0xaa, 0xa6, 0xfe, 0x15, 0x0d, 0x6a, 0xab, 0x43, 0xcb, 0x0e, 0x39,
+ 0x7e, 0x14, 0x40, 0xcd, 0xa0, 0x3f, 0x3b, 0x9e, 0x63, 0x9b, 0xfb, 0x62, 0x9a, 0x5f, 0x29, 0x34,
+ 0xb0, 0xab, 0x31, 0x9e, 0xc6, 0xb9, 0xc3, 0x83, 0x7a, 0x4d, 0x29, 0xc0, 0x2a, 0x15, 0x7d, 0x1b,
+ 0x54, 0x18, 0xfa, 0x1c, 0xcc, 0xf1, 0xaf, 0x5c, 0x37, 0x06, 0x98, 0x6c, 0x89, 0x4e, 0x7c, 0x40,
+ 0x19, 0xb4, 0x88, 0xd2, 0xca, 0x9d, 0xcd, 0x37, 0x89, 0x19, 0x62, 0xb2, 0x45, 0x7c, 0xe2, 0x9a,
+ 0x84, 0xcf, 0x5f, 0x53, 0x69, 0x8c, 0x13, 0xa8, 0xf4, 0x1f, 0x6a, 0xb0, 0xb8, 0xba, 0x6b, 0xd8,
+ 0x8e, 0xb1, 0x69, 0x3b, 0x76, 0xb8, 0xff, 0x79, 0xcf, 0x25, 0x63, 0x4c, 0xe0, 0x06, 0x3c, 0x32,
+ 0x74, 0x0d, 0xde, 0xce, 0x21, 0xeb, 0x7c, 0xca, 0xee, 0xee, 0x0f, 0x08, 0x65, 0x3d, 0x3a, 0xd4,
+ 0x8f, 0x1d, 0x1e, 0xd4, 0x1f, 0xd9, 0xc8, 0xae, 0x82, 0xf3, 0xda, 0x22, 0x0c, 0x97, 0x14, 0xd0,
+ 0x6b, 0x9e, 0x33, 0xec, 0x0b, 0xac, 0x13, 0x0c, 0xeb, 0xe5, 0xc3, 0x83, 0xfa, 0xa5, 0x8d, 0xcc,
+ 0x1a, 0x38, 0xa7, 0xa5, 0xfe, 0xbd, 0x0a, 0xcc, 0x35, 0x0c, 0x73, 0x67, 0x38, 0x68, 0x0c, 0xcd,
+ 0x1d, 0x12, 0xa2, 0x2f, 0x42, 0x95, 0x4a, 0x2e, 0xcb, 0x08, 0x0d, 0x31, 0x92, 0x3f, 0x9e, 0xcb,
+ 0x7e, 0x6c, 0x16, 0x69, 0xed, 0x78, 0x6c, 0xd7, 0x49, 0x68, 0x34, 0x90, 0x18, 0x13, 0x88, 0xcb,
+ 0xb0, 0xc4, 0x8a, 0x7a, 0x30, 0x19, 0x0c, 0x88, 0x29, 0x98, 0x7b, 0xad, 0x10, 0xb3, 0xa8, 0x5d,
+ 0xee, 0x0e, 0x88, 0x19, 0x4f, 0x03, 0xfd, 0x85, 0x19, 0x01, 0xe4, 0xc1, 0x74, 0x10, 0x1a, 0xe1,
+ 0x90, 0x8e, 0x0f, 0x25, 0x75, 0xbd, 0x3c, 0x29, 0x86, 0xae, 0xb1, 0x20, 0x88, 0x4d, 0xf3, 0xdf,
+ 0x58, 0x90, 0xd1, 0x7f, 0xa0, 0xc1, 0xa2, 0x5a, 0xfd, 0x96, 0x1d, 0x84, 0xe8, 0x2f, 0x8d, 0x0c,
+ 0xe8, 0xca, 0x78, 0x03, 0x4a, 0x5b, 0xb3, 0xe1, 0x5c, 0x14, 0xe4, 0xaa, 0x51, 0x89, 0x32, 0x98,
+ 0x5b, 0x30, 0x65, 0x87, 0xa4, 0xcf, 0x19, 0xab, 0xa8, 0x4c, 0x53, 0xfb, 0xdc, 0x98, 0x17, 0xd4,
+ 0xa6, 0xda, 0x14, 0x2f, 0xe6, 0xe8, 0xf5, 0x2f, 0xc2, 0x05, 0xb5, 0x56, 0xc7, 0xf7, 0x76, 0x6d,
+ 0x8b, 0xf8, 0x74, 0x31, 0x84, 0xfb, 0x83, 0x91, 0xc5, 0x40, 0x99, 0x0b, 0x33, 0x08, 0xfa, 0x10,
+ 0x4c, 0xfb, 0xa4, 0x67, 0x7b, 0x2e, 0x9b, 0xf0, 0xd9, 0x78, 0xf0, 0x30, 0x2b, 0xc5, 0x02, 0xaa,
+ 0xff, 0xf7, 0x4a, 0x72, 0xf0, 0xe8, 0x44, 0xa2, 0x7b, 0x50, 0x1d, 0x08, 0x52, 0x62, 0xf0, 0xda,
+ 0xa5, 0xbf, 0x30, 0xea, 0x7b, 0x3c, 0xae, 0x51, 0x09, 0x96, 0xc4, 0x90, 0x0d, 0x0b, 0xd1, 0xff,
+ 0xcd, 0x12, 0xb2, 0x98, 0xc9, 0xd4, 0x4e, 0x02, 0x11, 0x4e, 0x21, 0x46, 0x77, 0x61, 0x36, 0x20,
+ 0xa6, 0x4f, 0xa8, 0x5c, 0x12, 0x9c, 0x9a, 0x29, 0xbc, 0xba, 0x51, 0x25, 0x21, 0xbc, 0x96, 0x44,
+ 0xf7, 0x67, 0x25, 0x00, 0xc7, 0x88, 0xd0, 0xe3, 0x30, 0x19, 0x10, 0x62, 0x2d, 0x4f, 0xb2, 0x41,
+ 0xaf, 0xb2, 0xa5, 0x41, 0x88, 0x85, 0x59, 0xa9, 0xfe, 0x6b, 0x93, 0x80, 0x46, 0x19, 0x5b, 0xfd,
+ 0x6a, 0x5e, 0x22, 0x06, 0xbd, 0xcc, 0x57, 0x8b, 0x35, 0x92, 0x42, 0x8c, 0xde, 0x86, 0x79, 0xc7,
+ 0x08, 0xc2, 0x3b, 0x03, 0xaa, 0x7e, 0x44, 0xdc, 0x51, 0xfb, 0x68, 0xa3, 0xd0, 0xf4, 0xde, 0x52,
+ 0x31, 0x35, 0x96, 0x0e, 0x0f, 0xea, 0xf3, 0x89, 0x22, 0x9c, 0xa4, 0x85, 0x76, 0x60, 0x96, 0x16,
+ 0xac, 0xf9, 0xbe, 0xe7, 0x8b, 0x21, 0x7f, 0xb9, 0x30, 0x61, 0x86, 0xa5, 0x31, 0x4f, 0x67, 0x42,
+ 0xfe, 0xc4, 0x31, 0x7e, 0xf4, 0x19, 0x40, 0xde, 0x66, 0x40, 0xfc, 0x5d, 0x62, 0x5d, 0xe7, 0xda,
+ 0x16, 0xfd, 0x5c, 0x3a, 0x2f, 0x13, 0x8d, 0xcb, 0x62, 0x0e, 0xd1, 0x9d, 0x91, 0x1a, 0x38, 0xa3,
+ 0x15, 0xda, 0x01, 0x24, 0x35, 0x36, 0x39, 0xed, 0xcb, 0x53, 0xe3, 0x33, 0xcd, 0x25, 0x4a, 0xec,
+ 0xfa, 0x08, 0x0a, 0x9c, 0x81, 0x56, 0xff, 0xfd, 0x0a, 0xd4, 0x38, 0x93, 0xac, 0xb9, 0xa1, 0xbf,
+ 0x7f, 0x06, 0x5b, 0xc3, 0x56, 0x62, 0x6b, 0x68, 0x95, 0x58, 0xea, 0xac, 0xc7, 0xb9, 0x3b, 0x83,
+ 0x9b, 0xda, 0x19, 0xae, 0x95, 0xa6, 0x74, 0xf4, 0xc6, 0xf0, 0xc7, 0x1a, 0x9c, 0x53, 0x6a, 0x9f,
+ 0xc1, 0xbe, 0x40, 0x92, 0xfb, 0xc2, 0x2b, 0x65, 0x3f, 0x30, 0x67, 0x5b, 0x30, 0x13, 0xdf, 0xc5,
+ 0x44, 0xf6, 0x47, 0x01, 0x36, 0x99, 0x4c, 0xb9, 0x1d, 0x2b, 0x49, 0x72, 0xd6, 0x1b, 0x12, 0x82,
+ 0x95, 0x5a, 0x52, 0x58, 0x55, 0x32, 0x85, 0xd5, 0xbf, 0xaa, 0xc0, 0xd2, 0xc8, 0x58, 0x8f, 0x0a,
+ 0x10, 0xed, 0x9d, 0x12, 0x20, 0x95, 0x77, 0x44, 0x80, 0x4c, 0x14, 0x11, 0x20, 0xfa, 0xef, 0x69,
+ 0x30, 0xd1, 0xc4, 0x6d, 0xf4, 0xe1, 0x84, 0x12, 0xfb, 0x88, 0xaa, 0xc4, 0x3e, 0x38, 0xa8, 0xcf,
+ 0x34, 0x71, 0x5b, 0xd1, 0x67, 0x7f, 0x41, 0x83, 0x25, 0xd3, 0x73, 0x43, 0x83, 0x72, 0x24, 0xe6,
+ 0x12, 0x3f, 0xe2, 0xac, 0x62, 0xfa, 0x5b, 0x33, 0x85, 0xad, 0xf1, 0xa8, 0xe8, 0xc1, 0x52, 0x1a,
+ 0x12, 0xe0, 0x51, 0xd2, 0xfa, 0x06, 0xcc, 0x36, 0x1d, 0x6f, 0x68, 0xb5, 0xdd, 0x2d, 0xef, 0x14,
+ 0x55, 0x90, 0x7f, 0xaf, 0xc1, 0x1c, 0xc3, 0xdb, 0xf1, 0xbd, 0x2d, 0xdb, 0x21, 0xef, 0x11, 0x65,
+ 0x58, 0xed, 0x72, 0x9e, 0xc8, 0x63, 0xba, 0xa9, 0x5a, 0xf1, 0xbd, 0xa2, 0x9b, 0xaa, 0x7d, 0xce,
+ 0x11, 0x42, 0xdf, 0x98, 0x49, 0x7e, 0x1a, 0x13, 0x43, 0x4f, 0x43, 0xd5, 0x34, 0x1a, 0x43, 0xd7,
+ 0x72, 0x24, 0x67, 0xd0, 0x6e, 0x36, 0x57, 0x79, 0x19, 0x96, 0x50, 0xf4, 0x36, 0x40, 0xec, 0x34,
+ 0x10, 0x13, 0x71, 0xbd, 0xa4, 0xa7, 0xa2, 0x4b, 0x42, 0x6a, 0x6b, 0x07, 0xf1, 0xec, 0xc7, 0x30,
+ 0xac, 0x90, 0x43, 0x7f, 0x05, 0xe6, 0xc5, 0x30, 0xb7, 0xfb, 0x46, 0x4f, 0x98, 0x72, 0x45, 0xc7,
+ 0x6a, 0x5d, 0xc1, 0xd4, 0xb8, 0x28, 0x28, 0xcf, 0xab, 0xa5, 0x01, 0x4e, 0x92, 0x43, 0xf7, 0x61,
+ 0xae, 0xaf, 0xda, 0xa7, 0x93, 0x25, 0xb6, 0x0b, 0xc5, 0x58, 0x6d, 0x5c, 0x10, 0xd4, 0xe7, 0x12,
+ 0xa6, 0x6d, 0x82, 0x56, 0x86, 0x8e, 0x3d, 0xf5, 0xb0, 0x74, 0xec, 0x2d, 0x98, 0xe1, 0x6b, 0x3c,
+ 0x58, 0x9e, 0x66, 0x5f, 0xf8, 0xa9, 0x42, 0x5f, 0xc8, 0xe5, 0x45, 0xec, 0x0a, 0xe3, 0xbf, 0x03,
+ 0x1c, 0x21, 0x47, 0xf7, 0x60, 0x8e, 0x6e, 0x59, 0x5d, 0xe2, 0x10, 0x33, 0xf4, 0xfc, 0xe5, 0x99,
+ 0x12, 0x9e, 0xa6, 0xae, 0x82, 0x88, 0x7b, 0x2a, 0xd4, 0x12, 0x9c, 0x20, 0x24, 0x85, 0x60, 0x35,
+ 0x57, 0x08, 0xee, 0x42, 0x6d, 0x57, 0x71, 0x19, 0xcc, 0xb2, 0x61, 0xf8, 0x74, 0xa1, 0x9e, 0xc5,
+ 0x0e, 0x84, 0xc6, 0x79, 0x41, 0xa9, 0xa6, 0x3a, 0x1b, 0x54, 0x42, 0xfa, 0xcf, 0x4d, 0xc3, 0x52,
+ 0xd3, 0x19, 0x06, 0x21, 0xf1, 0x57, 0x85, 0x37, 0x97, 0xf8, 0xe8, 0x2b, 0x1a, 0x5c, 0x62, 0xff,
+ 0xb6, 0xbc, 0x7b, 0x6e, 0x8b, 0x38, 0xc6, 0xfe, 0xea, 0x16, 0xad, 0x61, 0x59, 0x27, 0x13, 0x44,
+ 0xad, 0xa1, 0xd8, 0xb3, 0x99, 0xf3, 0xa3, 0x9b, 0x89, 0x11, 0xe7, 0x50, 0x42, 0x3f, 0xaf, 0xc1,
+ 0xa3, 0x19, 0xa0, 0x16, 0x71, 0x48, 0x48, 0x84, 0x24, 0x38, 0x69, 0x3f, 0x9e, 0x38, 0x3c, 0xa8,
+ 0x3f, 0xda, 0xcd, 0x43, 0x8a, 0xf3, 0xe9, 0xd1, 0x5d, 0xf6, 0x72, 0x06, 0xf4, 0x9a, 0x61, 0x3b,
+ 0x43, 0x9f, 0x08, 0x4d, 0xf5, 0xa4, 0xdd, 0xb9, 0x72, 0x78, 0x50, 0xbf, 0xdc, 0xcd, 0xc5, 0x8a,
+ 0x8f, 0xa0, 0x88, 0xbe, 0x04, 0x17, 0x25, 0x74, 0xc3, 0x75, 0x09, 0xb1, 0x88, 0x75, 0xd7, 0xee,
+ 0x13, 0x66, 0xbb, 0x9c, 0xbc, 0x2b, 0x8f, 0x1e, 0x1e, 0xd4, 0x2f, 0x76, 0xb3, 0x10, 0xe2, 0x6c,
+ 0x3a, 0xa8, 0x07, 0x4f, 0xc4, 0x80, 0xd0, 0x76, 0xec, 0xfb, 0x0c, 0xd3, 0xdd, 0x6d, 0x9f, 0x04,
+ 0xdb, 0x9e, 0x63, 0x31, 0x79, 0xa1, 0x35, 0xde, 0x7f, 0x78, 0x50, 0x7f, 0xa2, 0x7b, 0x54, 0x45,
+ 0x7c, 0x34, 0x1e, 0x64, 0xc1, 0x5c, 0x60, 0x1a, 0x6e, 0xdb, 0x0d, 0x89, 0xbf, 0x6b, 0x38, 0xcb,
+ 0xd3, 0x85, 0x3e, 0x90, 0xaf, 0x51, 0x05, 0x0f, 0x4e, 0x60, 0xd5, 0xff, 0x87, 0x06, 0x35, 0xb1,
+ 0x12, 0x98, 0xe2, 0x62, 0xc2, 0x94, 0x49, 0xb7, 0x2d, 0xc1, 0xf1, 0x2f, 0x17, 0xdf, 0x1f, 0x29,
+ 0xba, 0x78, 0x73, 0x64, 0x45, 0x98, 0xe3, 0x46, 0xf7, 0x32, 0x76, 0xb7, 0x66, 0xc9, 0xdd, 0x8d,
+ 0x91, 0x3b, 0x66, 0x67, 0xd3, 0x0f, 0x26, 0x60, 0xb6, 0xe9, 0xb9, 0x96, 0xcd, 0x14, 0xe6, 0x67,
+ 0x13, 0x4a, 0xda, 0x13, 0xaa, 0x7c, 0x7a, 0x70, 0x50, 0x9f, 0x97, 0x15, 0x15, 0x81, 0xf5, 0xbc,
+ 0x34, 0xd2, 0xb8, 0xd6, 0xf6, 0xfe, 0xa4, 0x71, 0xf5, 0xe0, 0xa0, 0x7e, 0x4e, 0x36, 0x4b, 0xda,
+ 0x5b, 0x68, 0x17, 0x10, 0x55, 0x9f, 0xef, 0xfa, 0x86, 0x1b, 0x70, 0xb4, 0x94, 0x6d, 0xf9, 0x0a,
+ 0xfa, 0x8b, 0xe3, 0xcd, 0x2a, 0x6d, 0x11, 0x6b, 0xd7, 0xb7, 0x46, 0xb0, 0xe1, 0x0c, 0x0a, 0xe8,
+ 0x4d, 0x58, 0xa0, 0xa5, 0x1b, 0x03, 0xcb, 0x08, 0x89, 0xb2, 0x54, 0x4e, 0x42, 0xf3, 0x92, 0xa0,
+ 0xb9, 0x70, 0x2b, 0x81, 0x09, 0xa7, 0x30, 0x73, 0xa5, 0xd6, 0x08, 0x3c, 0x97, 0xad, 0x82, 0x84,
+ 0x52, 0x4b, 0x4b, 0xb1, 0x80, 0xa2, 0x67, 0x60, 0xa6, 0x4f, 0x82, 0xc0, 0xe8, 0x11, 0xc6, 0xd6,
+ 0xb3, 0xf1, 0xee, 0xb5, 0xce, 0x8b, 0x71, 0x04, 0x47, 0x1f, 0x81, 0x29, 0xd3, 0xb3, 0x48, 0xb0,
+ 0x3c, 0xc3, 0xfc, 0xc9, 0x97, 0x18, 0x33, 0xd1, 0x82, 0x07, 0x07, 0xf5, 0x59, 0x66, 0x8b, 0xd0,
+ 0x5f, 0x98, 0x57, 0xd2, 0x7f, 0x85, 0x6a, 0x94, 0x29, 0xd5, 0x3c, 0xcf, 0x39, 0xce, 0xa7, 0x95,
+ 0x19, 0x13, 0x67, 0xe7, 0x59, 0xd3, 0xbf, 0x5f, 0x81, 0x0b, 0xb4, 0x87, 0xbe, 0xe7, 0x38, 0x54,
+ 0xcc, 0x0e, 0x1c, 0x6f, 0xbf, 0x4f, 0xdc, 0x70, 0x0c, 0x93, 0xe1, 0x0c, 0xfd, 0x7f, 0xaf, 0xc0,
+ 0xf4, 0x80, 0x1f, 0x9f, 0x4c, 0xb0, 0xee, 0x3c, 0x4d, 0x27, 0x91, 0x9f, 0x6d, 0x3c, 0x38, 0xa8,
+ 0x5f, 0xce, 0xfa, 0x00, 0x71, 0x2a, 0x22, 0xda, 0x21, 0x3b, 0xa5, 0x75, 0x70, 0xa6, 0x7b, 0x6e,
+ 0x4c, 0x55, 0xde, 0xd8, 0x24, 0xce, 0xb8, 0x7a, 0x86, 0xfe, 0x5f, 0x2b, 0x70, 0x29, 0xee, 0x51,
+ 0xdb, 0x0d, 0x42, 0xc3, 0x71, 0xb8, 0x4d, 0xfc, 0xf0, 0x8d, 0xa5, 0xb7, 0x12, 0xc6, 0xd2, 0x9d,
+ 0xc2, 0x96, 0xe7, 0x68, 0xe7, 0x73, 0x3d, 0x45, 0xfb, 0x29, 0x4f, 0xd1, 0xab, 0xa7, 0x49, 0xf4,
+ 0x68, 0xa7, 0xd1, 0xff, 0xd4, 0xe0, 0x72, 0x76, 0xc3, 0x33, 0xb0, 0xdd, 0x06, 0x49, 0xdb, 0xed,
+ 0xe6, 0x29, 0x7e, 0x76, 0x8e, 0x15, 0xf7, 0x47, 0xb9, 0x9f, 0xcb, 0xec, 0xb9, 0x2d, 0x38, 0x47,
+ 0x95, 0xec, 0x20, 0x14, 0xce, 0x8d, 0x93, 0x1d, 0xf4, 0x45, 0x0e, 0x8e, 0x73, 0x38, 0x89, 0x03,
+ 0xa7, 0x91, 0xa2, 0xdb, 0x30, 0x43, 0x19, 0x9e, 0xe2, 0xaf, 0x8c, 0x8f, 0x5f, 0xca, 0xd4, 0x2e,
+ 0x6f, 0x8b, 0x23, 0x24, 0xfa, 0xff, 0xd3, 0xe0, 0xf1, 0xa3, 0xa6, 0x1f, 0xf9, 0x00, 0x66, 0xb4,
+ 0x8d, 0xf1, 0xa3, 0xd8, 0xc2, 0xaa, 0x40, 0x84, 0x26, 0x5e, 0x46, 0xb2, 0x28, 0xc0, 0x0a, 0x95,
+ 0x0c, 0x3f, 0x7f, 0xe5, 0x21, 0xf9, 0xf9, 0xf5, 0xff, 0xa5, 0xa9, 0x02, 0x43, 0x1d, 0xfe, 0xf7,
+ 0x9c, 0xc0, 0x50, 0x3b, 0x9f, 0xeb, 0x67, 0x49, 0xae, 0x5a, 0xb5, 0xc9, 0x7b, 0x6f, 0xd5, 0xaa,
+ 0xbd, 0xcf, 0x59, 0xb5, 0xbf, 0x58, 0xc9, 0xfb, 0x5c, 0xb6, 0x6a, 0xf7, 0x60, 0x36, 0x0a, 0x9f,
+ 0x89, 0x78, 0xfb, 0x7a, 0xe9, 0x4e, 0x71, 0x7c, 0xf1, 0xf9, 0x57, 0x54, 0x12, 0xe0, 0x98, 0x18,
+ 0xda, 0x07, 0xb0, 0xe4, 0x7e, 0x29, 0x18, 0xa0, 0x5d, 0x92, 0x74, 0xbc, 0x01, 0x37, 0x16, 0x28,
+ 0xcf, 0xc5, 0xbf, 0xb1, 0x42, 0x4c, 0xff, 0x8d, 0x0a, 0xa0, 0xd1, 0xfe, 0x52, 0xa5, 0x63, 0xc7,
+ 0x76, 0xad, 0xb4, 0xd2, 0x71, 0xd3, 0x76, 0x2d, 0xcc, 0x20, 0x52, 0x2d, 0xa9, 0xe4, 0xaa, 0x25,
+ 0x2f, 0xc1, 0xb9, 0x9e, 0xe3, 0x6d, 0x1a, 0x8e, 0xb3, 0x2f, 0xc2, 0x70, 0xd8, 0xbe, 0x54, 0x6d,
+ 0x9c, 0xa7, 0xc2, 0xed, 0x7a, 0x12, 0x84, 0xd3, 0x75, 0xd1, 0x00, 0x16, 0x7d, 0x62, 0x7a, 0xae,
+ 0x69, 0x3b, 0x4c, 0x89, 0xf4, 0x86, 0x61, 0x41, 0x63, 0xee, 0xc2, 0xe1, 0x41, 0x7d, 0x11, 0xa7,
+ 0x70, 0xe1, 0x11, 0xec, 0xe8, 0x29, 0x98, 0x19, 0xf8, 0x76, 0xdf, 0xf0, 0xf7, 0x99, 0x9a, 0x5a,
+ 0x6d, 0xd4, 0xa8, 0x94, 0xec, 0xf0, 0x22, 0x1c, 0xc1, 0xf4, 0x6f, 0x6a, 0x30, 0xd1, 0xba, 0xdd,
+ 0x45, 0x3a, 0x4c, 0x5b, 0x5e, 0xdf, 0xb0, 0x5d, 0x31, 0x4a, 0x2c, 0x96, 0xa5, 0xc5, 0x4a, 0xb0,
+ 0x80, 0xa0, 0xb7, 0x60, 0x36, 0x92, 0x31, 0xe5, 0x8e, 0x37, 0x5a, 0xb7, 0xbb, 0xf2, 0x2c, 0x58,
+ 0x32, 0x53, 0x54, 0x12, 0xe0, 0x98, 0x8a, 0x6e, 0xc0, 0x52, 0xeb, 0x76, 0xb7, 0xed, 0x9a, 0xce,
+ 0xd0, 0x22, 0x6b, 0x7b, 0xec, 0x0f, 0xfd, 0x34, 0x9b, 0x97, 0x88, 0x00, 0x1a, 0xf6, 0x69, 0xa2,
+ 0x12, 0x8e, 0x60, 0xb4, 0x1a, 0xe1, 0x2d, 0x44, 0xf0, 0x07, 0xab, 0x26, 0x90, 0xe0, 0x08, 0xa6,
+ 0xff, 0x49, 0x05, 0x6a, 0x4a, 0x87, 0x50, 0x1f, 0x66, 0xf8, 0xf7, 0x46, 0x67, 0xb0, 0xd7, 0x8a,
+ 0x7e, 0x63, 0xb2, 0xdb, 0x9c, 0x3c, 0x1f, 0xd2, 0x00, 0x47, 0x34, 0xd4, 0x79, 0xaa, 0xe4, 0xcf,
+ 0x13, 0x5a, 0x01, 0xe0, 0x47, 0xcc, 0xec, 0x70, 0x87, 0xeb, 0xab, 0x6c, 0x29, 0x74, 0x65, 0x29,
+ 0x56, 0x6a, 0xa0, 0xc7, 0x05, 0x47, 0x2b, 0xa7, 0xd0, 0x0a, 0x37, 0xf7, 0x60, 0xea, 0xbe, 0xe7,
+ 0x92, 0x40, 0xf8, 0xfd, 0x4e, 0xeb, 0x0b, 0x67, 0xa9, 0x94, 0xfa, 0x3c, 0x45, 0x8c, 0x39, 0x7e,
+ 0xfd, 0x5b, 0x1a, 0x40, 0xcb, 0x08, 0x0d, 0xee, 0xa4, 0x1a, 0x23, 0x82, 0xe7, 0xf1, 0xc4, 0x4a,
+ 0xac, 0x8e, 0x9c, 0x27, 0x4c, 0x06, 0xf6, 0xfd, 0xe8, 0xfb, 0xe5, 0x16, 0xc4, 0xb1, 0x77, 0xed,
+ 0xfb, 0x04, 0x33, 0x38, 0xfa, 0x30, 0xcc, 0x12, 0xd7, 0xf4, 0xf7, 0x07, 0xa1, 0x38, 0x88, 0xaf,
+ 0xf2, 0x53, 0x9e, 0xb5, 0xa8, 0x10, 0xc7, 0x70, 0x7d, 0x17, 0xaa, 0x6b, 0xae, 0x35, 0xf0, 0x6c,
+ 0x6e, 0x9f, 0x1c, 0xd3, 0xc1, 0x27, 0x60, 0x62, 0xe8, 0x3b, 0xa2, 0x7f, 0x35, 0x51, 0x61, 0x62,
+ 0x03, 0xdf, 0xc2, 0xb4, 0x9c, 0x1a, 0x7d, 0x83, 0xa1, 0x3f, 0xf0, 0x82, 0xa8, 0x93, 0x52, 0x41,
+ 0xe9, 0xf0, 0x62, 0x1c, 0xc1, 0xf5, 0x07, 0x1a, 0x2c, 0xae, 0xed, 0x0d, 0x6c, 0x9f, 0x85, 0x06,
+ 0x11, 0x9f, 0xee, 0xec, 0xb4, 0xfd, 0x2e, 0xff, 0x57, 0xf4, 0x41, 0xb6, 0x17, 0x35, 0x70, 0x04,
+ 0x47, 0x5b, 0xb0, 0x40, 0x58, 0x73, 0x2a, 0x17, 0x5a, 0x86, 0x74, 0x9c, 0x9d, 0xc4, 0xe6, 0xe5,
+ 0xa1, 0x67, 0x09, 0x2c, 0x38, 0x85, 0x15, 0x75, 0x61, 0xc1, 0x74, 0x8c, 0x20, 0xb0, 0xb7, 0x6c,
+ 0x33, 0x3e, 0x01, 0x9b, 0x6d, 0x7c, 0x98, 0xb6, 0x6d, 0x26, 0x20, 0x0f, 0x0e, 0xea, 0x17, 0x45,
+ 0x3f, 0x93, 0x00, 0x9c, 0x42, 0xa1, 0xff, 0xae, 0x06, 0xb3, 0x52, 0x9f, 0x79, 0x77, 0x99, 0x85,
+ 0x4f, 0x43, 0xd5, 0xb2, 0x03, 0x55, 0xc6, 0xb3, 0x03, 0x8c, 0x96, 0x28, 0xc3, 0x12, 0xaa, 0xff,
+ 0xeb, 0x0a, 0x5c, 0x92, 0xb8, 0xa3, 0xed, 0x86, 0xaa, 0x5f, 0xe3, 0xec, 0x39, 0x8f, 0x0b, 0x56,
+ 0x53, 0x38, 0x5d, 0x61, 0xb3, 0xa7, 0xd2, 0x7c, 0x54, 0xcb, 0xe2, 0x21, 0x74, 0x1b, 0xa6, 0xa8,
+ 0xd1, 0x12, 0xb9, 0x3b, 0x4e, 0x38, 0x1a, 0x6c, 0xbd, 0xb2, 0xfe, 0x62, 0x8e, 0x06, 0xbd, 0xad,
+ 0xaa, 0x0d, 0x53, 0x4c, 0xc4, 0x7f, 0xe6, 0x64, 0xc2, 0x81, 0x45, 0x22, 0xaf, 0xd0, 0x2f, 0xb1,
+ 0xa2, 0x11, 0xc9, 0x88, 0x9c, 0xc9, 0xd2, 0x1c, 0xf4, 0x00, 0xaa, 0xd7, 0x05, 0x5a, 0x74, 0x19,
+ 0x2a, 0x76, 0x34, 0x7a, 0x20, 0x5a, 0x55, 0xda, 0x2d, 0x5c, 0xb1, 0x2d, 0xb9, 0x48, 0x2b, 0xb9,
+ 0x8b, 0x54, 0x59, 0x45, 0x13, 0x47, 0xaf, 0x22, 0xfd, 0x1f, 0x69, 0x70, 0x21, 0xa2, 0x1a, 0xf5,
+ 0x8a, 0x4a, 0xac, 0x31, 0x44, 0xc1, 0xf1, 0x5a, 0xc3, 0x1d, 0x98, 0x64, 0x0a, 0xe7, 0x44, 0x91,
+ 0xd9, 0x91, 0x08, 0x69, 0x77, 0x30, 0x43, 0xa4, 0x7f, 0x5b, 0x83, 0xda, 0x0d, 0x7b, 0x93, 0xf8,
+ 0x2e, 0xd7, 0xe4, 0x9f, 0x4a, 0x07, 0x0b, 0xd7, 0xb2, 0x02, 0x85, 0xd1, 0x3e, 0xcc, 0x06, 0xe6,
+ 0x36, 0xb1, 0x86, 0x8e, 0x3c, 0x3e, 0x2e, 0x16, 0x12, 0xac, 0xd0, 0xee, 0x0a, 0x84, 0x4a, 0x38,
+ 0x54, 0x44, 0x02, 0xc7, 0xd4, 0xf4, 0xb7, 0xe1, 0x7c, 0x46, 0x23, 0x54, 0x67, 0x8c, 0xeb, 0x87,
+ 0x62, 0x78, 0x23, 0x4e, 0xf4, 0x43, 0xcc, 0xcb, 0xd1, 0xa3, 0x30, 0x41, 0xdc, 0x28, 0x30, 0x61,
+ 0x86, 0xca, 0xd8, 0x35, 0xd7, 0xc2, 0xb4, 0x8c, 0x2e, 0x50, 0xc7, 0x4b, 0x88, 0x22, 0xb6, 0x40,
+ 0x6f, 0x89, 0x32, 0x2c, 0xa1, 0xfa, 0xdf, 0x9c, 0x86, 0x27, 0x6e, 0x78, 0xbe, 0x7d, 0xdf, 0x73,
+ 0x43, 0xc3, 0xe9, 0x78, 0x56, 0x7c, 0x10, 0x22, 0x16, 0xfb, 0xcf, 0x6a, 0xf0, 0x88, 0x39, 0x18,
+ 0xb6, 0x5d, 0x3b, 0xb4, 0x8d, 0xc8, 0x3f, 0xdd, 0x21, 0xbe, 0xed, 0x15, 0x3d, 0x0f, 0x61, 0x21,
+ 0xa6, 0xcd, 0xce, 0x46, 0x16, 0x4a, 0x9c, 0x47, 0x0b, 0xbd, 0x09, 0x0b, 0x96, 0x77, 0xcf, 0xe5,
+ 0xde, 0x72, 0xe2, 0x18, 0xfb, 0x05, 0x4f, 0x41, 0x98, 0x80, 0x6b, 0x25, 0x30, 0xe1, 0x14, 0x66,
+ 0x76, 0x04, 0x24, 0x8b, 0xba, 0x21, 0x0b, 0xb1, 0xbd, 0x1f, 0x0f, 0x67, 0xc1, 0x23, 0xa0, 0x56,
+ 0x26, 0x46, 0x9c, 0x43, 0x09, 0x7d, 0x09, 0x2e, 0xda, 0x7c, 0x20, 0x30, 0x31, 0x2c, 0xdb, 0x25,
+ 0x41, 0xc0, 0xbf, 0xbb, 0xc4, 0x19, 0x47, 0x3b, 0x0b, 0x21, 0xce, 0xa6, 0x83, 0x7e, 0x12, 0x20,
+ 0xd8, 0x77, 0x4d, 0x31, 0xd7, 0x53, 0x85, 0xa8, 0x72, 0x0d, 0x4c, 0x62, 0xc1, 0x0a, 0x46, 0xaa,
+ 0x83, 0x84, 0x9e, 0x43, 0x7c, 0xc3, 0x35, 0xb9, 0x03, 0x58, 0xe3, 0x3a, 0xc8, 0xdd, 0xa8, 0x10,
+ 0xc7, 0x70, 0x64, 0xc1, 0xdc, 0x70, 0xa0, 0x4c, 0xfe, 0x4c, 0xf1, 0x73, 0x90, 0x0d, 0x05, 0x0f,
+ 0x4e, 0x60, 0xd5, 0xff, 0x89, 0x06, 0x33, 0x22, 0x42, 0x1e, 0x7d, 0x28, 0xa5, 0xf0, 0x4b, 0x67,
+ 0x58, 0x4a, 0xe9, 0xbf, 0xcf, 0xbc, 0x24, 0xc2, 0xa4, 0x12, 0x4c, 0x59, 0x4c, 0x5f, 0x14, 0x94,
+ 0x63, 0x03, 0x2d, 0xe1, 0x2d, 0x89, 0x8c, 0x36, 0x85, 0x9a, 0xfe, 0xab, 0x1a, 0x2c, 0x8d, 0xb4,
+ 0x1a, 0x63, 0x6b, 0x3d, 0x43, 0x4f, 0xf7, 0x37, 0x01, 0xd8, 0x75, 0x88, 0xd5, 0x4e, 0xbb, 0x4b,
+ 0xfc, 0x5d, 0x29, 0x57, 0x7e, 0x5e, 0x83, 0xc5, 0xf8, 0x4c, 0x46, 0xf4, 0x42, 0x2b, 0x11, 0x6b,
+ 0x72, 0x33, 0x85, 0xac, 0xb1, 0x2c, 0x3e, 0x7c, 0x31, 0x0d, 0xc1, 0x23, 0x84, 0xd1, 0xdf, 0xd0,
+ 0x60, 0xd1, 0x48, 0x5e, 0x87, 0x88, 0xf6, 0x81, 0x62, 0xb1, 0x7e, 0xa9, 0xbb, 0x15, 0x71, 0x67,
+ 0x52, 0x80, 0x00, 0x8f, 0xd0, 0x45, 0x1f, 0x83, 0x39, 0x63, 0x60, 0xaf, 0x0e, 0x2d, 0x9b, 0xea,
+ 0x04, 0x51, 0x0c, 0x3d, 0xe3, 0xdd, 0xd5, 0x4e, 0x5b, 0x96, 0xe3, 0x44, 0x2d, 0x79, 0xe1, 0x41,
+ 0x0c, 0xe5, 0x64, 0xd9, 0x0b, 0x0f, 0x62, 0x14, 0xe3, 0x0b, 0x0f, 0x62, 0xf0, 0x54, 0x2a, 0xe8,
+ 0x0b, 0xf0, 0x28, 0xdf, 0x42, 0x1b, 0x46, 0x60, 0x9b, 0xab, 0xc3, 0x70, 0x9b, 0xb8, 0x61, 0xa4,
+ 0x05, 0x73, 0xb3, 0x9a, 0x1d, 0x3b, 0xaf, 0xe5, 0x55, 0xc2, 0xf9, 0xed, 0x91, 0x07, 0xe0, 0xd9,
+ 0x96, 0x29, 0x3e, 0x88, 0x9f, 0x7c, 0x16, 0x0b, 0x0b, 0xb8, 0xd3, 0x6e, 0x35, 0xc5, 0xf7, 0x30,
+ 0x89, 0x14, 0xff, 0xc6, 0x0a, 0x09, 0xf4, 0x77, 0x35, 0x98, 0x17, 0x8c, 0x2e, 0x88, 0xce, 0x30,
+ 0x16, 0xf8, 0x42, 0x61, 0x86, 0x4c, 0x71, 0xfd, 0x0a, 0x56, 0xb1, 0xf3, 0xf0, 0x45, 0x19, 0x0d,
+ 0x93, 0x80, 0xe1, 0x64, 0x47, 0xd0, 0xd7, 0x35, 0xb8, 0x10, 0x10, 0x7f, 0xd7, 0x36, 0xc9, 0xaa,
+ 0x69, 0x7a, 0x43, 0x37, 0x9a, 0xe7, 0x6a, 0x09, 0xff, 0x51, 0x37, 0x03, 0x61, 0x63, 0xf9, 0xf0,
+ 0xa0, 0x7e, 0x21, 0x0b, 0x82, 0x33, 0x3b, 0x80, 0x7e, 0x5a, 0x83, 0x73, 0xf7, 0x8c, 0xd0, 0xdc,
+ 0x6e, 0x1a, 0xe6, 0x36, 0xb3, 0x31, 0x83, 0xe5, 0xd9, 0x12, 0x51, 0xb2, 0xaf, 0x27, 0x71, 0x71,
+ 0xff, 0x51, 0xaa, 0x10, 0xa7, 0x29, 0xa2, 0x10, 0xaa, 0x3e, 0x79, 0x6b, 0x48, 0x82, 0x30, 0x58,
+ 0x06, 0x46, 0xfd, 0x33, 0xe5, 0x27, 0x0d, 0x0b, 0x8c, 0x5c, 0x7d, 0x8a, 0x7e, 0x61, 0x49, 0xe9,
+ 0xf2, 0x2b, 0x80, 0x46, 0x67, 0x14, 0x2d, 0xc2, 0xc4, 0x0e, 0xe1, 0x57, 0x8e, 0x66, 0x31, 0xfd,
+ 0x17, 0x5d, 0x80, 0xa9, 0x5d, 0xc3, 0x19, 0x72, 0x4d, 0xb8, 0x8a, 0xf9, 0x8f, 0x17, 0x2a, 0x9f,
+ 0xd4, 0xf4, 0xef, 0x68, 0x70, 0x31, 0x93, 0x26, 0xc2, 0x70, 0xa9, 0x6f, 0xec, 0xdd, 0xf6, 0xdc,
+ 0xf5, 0x61, 0x68, 0x84, 0xb6, 0xdb, 0x6b, 0xbb, 0x5b, 0x8e, 0xdd, 0xdb, 0xe6, 0x1a, 0xe1, 0x14,
+ 0xd7, 0x29, 0xd6, 0x33, 0x6b, 0xe0, 0x9c, 0x96, 0xa8, 0x0d, 0xe7, 0xfb, 0xc6, 0xde, 0x08, 0xc2,
+ 0x0a, 0x43, 0xc8, 0x6e, 0xae, 0xad, 0x8f, 0x82, 0x71, 0x56, 0x1b, 0xfd, 0xeb, 0x93, 0xf0, 0x18,
+ 0xed, 0x78, 0xbc, 0xef, 0xac, 0x1b, 0xae, 0xd1, 0x7b, 0x97, 0xca, 0xf7, 0x6f, 0x6b, 0xf0, 0xc8,
+ 0x76, 0xb6, 0x9e, 0x2b, 0xb6, 0x3e, 0x5c, 0x4c, 0xdd, 0x3f, 0x4a, 0x77, 0xe6, 0xe1, 0x1f, 0x47,
+ 0x56, 0xc1, 0x79, 0xbd, 0x42, 0xaf, 0xc0, 0xa2, 0xeb, 0x59, 0xa4, 0xd9, 0x6e, 0xe1, 0x75, 0x23,
+ 0xd8, 0xe9, 0x46, 0x5e, 0x9d, 0x29, 0xee, 0xe0, 0xbc, 0x9d, 0x82, 0xe1, 0x91, 0xda, 0x68, 0x17,
+ 0xd0, 0xc0, 0xb3, 0xd6, 0x76, 0x6d, 0x33, 0x8a, 0x02, 0x28, 0xee, 0x54, 0x65, 0xc1, 0xf9, 0x9d,
+ 0x11, 0x6c, 0x38, 0x83, 0x82, 0xfe, 0x3d, 0x0d, 0xce, 0xd1, 0x29, 0xe9, 0xf8, 0xde, 0xde, 0xfe,
+ 0xbb, 0x92, 0x1b, 0x9e, 0x81, 0xc9, 0xbe, 0x67, 0x45, 0x76, 0xe9, 0x45, 0xaa, 0x1c, 0xad, 0x7b,
+ 0x16, 0x79, 0xc0, 0x9d, 0xac, 0x7b, 0xfb, 0xf4, 0x07, 0x66, 0x55, 0xf4, 0x3f, 0xd5, 0xb8, 0xfa,
+ 0x12, 0xd9, 0x65, 0xef, 0x4e, 0xf6, 0xfe, 0x04, 0xcc, 0xd3, 0xb2, 0x75, 0x63, 0xaf, 0xd3, 0x7a,
+ 0xcd, 0x73, 0xa2, 0xb8, 0x14, 0x16, 0x2d, 0x7e, 0x53, 0x05, 0xe0, 0x64, 0x3d, 0xfd, 0x5b, 0xf3,
+ 0xc0, 0x2a, 0x38, 0x24, 0x7c, 0x57, 0x7e, 0xd8, 0xb3, 0x50, 0x33, 0x07, 0xc3, 0xe6, 0xb5, 0xee,
+ 0xab, 0x43, 0x2f, 0x34, 0x84, 0x03, 0x98, 0xa9, 0x24, 0xcd, 0xce, 0x46, 0x54, 0x8c, 0xd5, 0x3a,
+ 0x74, 0xe1, 0x98, 0x83, 0xa1, 0x10, 0x46, 0x1d, 0x35, 0x7c, 0x81, 0x2d, 0x9c, 0x66, 0x67, 0x23,
+ 0x01, 0xc3, 0x23, 0xb5, 0xd1, 0x97, 0x35, 0x98, 0x23, 0x82, 0xa9, 0x6f, 0x18, 0xbe, 0x25, 0xd6,
+ 0x4c, 0xf1, 0x0d, 0x45, 0x8e, 0x6e, 0xb4, 0x54, 0xb8, 0x32, 0xb7, 0xa6, 0xd0, 0xc0, 0x09, 0x8a,
+ 0x4c, 0xaf, 0x12, 0xbf, 0xe9, 0x64, 0x79, 0xd6, 0x75, 0xdf, 0x30, 0x89, 0x62, 0x8a, 0x4d, 0x09,
+ 0xbd, 0x2a, 0xaf, 0x12, 0xce, 0x6f, 0x8f, 0xfe, 0xb1, 0x06, 0x97, 0x24, 0xd4, 0x76, 0xed, 0xfe,
+ 0xb0, 0x8f, 0x89, 0xe9, 0x18, 0x76, 0x5f, 0x28, 0x59, 0x9f, 0x3d, 0xbd, 0x2f, 0x4d, 0xe2, 0xe7,
+ 0x9b, 0x56, 0x36, 0x0c, 0xe7, 0xf4, 0x09, 0xfd, 0xaa, 0x06, 0x4f, 0x46, 0xa0, 0x0e, 0x35, 0x75,
+ 0x86, 0x3e, 0x89, 0xa3, 0x9b, 0xc4, 0x98, 0x14, 0xb3, 0x07, 0x3f, 0x78, 0x78, 0x50, 0x7f, 0x72,
+ 0xed, 0x18, 0xdc, 0xf8, 0x58, 0xea, 0x09, 0x8e, 0xe9, 0x7a, 0x5b, 0xa1, 0xd0, 0xca, 0x1e, 0x1a,
+ 0xc7, 0x50, 0x1a, 0x38, 0x41, 0x11, 0xfd, 0x86, 0x06, 0x8f, 0xa8, 0x05, 0x2a, 0xc3, 0x70, 0x75,
+ 0xec, 0x73, 0xa7, 0xd7, 0x9b, 0x14, 0x01, 0xee, 0xd2, 0xc9, 0x01, 0xe2, 0xbc, 0x6e, 0xa1, 0xa7,
+ 0x60, 0xa6, 0xcf, 0x98, 0x93, 0xab, 0x6c, 0x53, 0xdc, 0x37, 0xc7, 0xf9, 0x35, 0xc0, 0x11, 0x8c,
+ 0x9a, 0x43, 0x03, 0xcf, 0xea, 0xd8, 0x56, 0x70, 0xcb, 0xee, 0xdb, 0xe1, 0x72, 0x8d, 0x5d, 0x2f,
+ 0x61, 0xe3, 0xd1, 0xf1, 0xac, 0x4e, 0xbb, 0xc5, 0xcb, 0x71, 0xa2, 0x16, 0x8b, 0xa0, 0xb5, 0xfb,
+ 0x46, 0x8f, 0x74, 0x86, 0x8e, 0xd3, 0xf1, 0x3d, 0x66, 0x24, 0xb7, 0x88, 0x61, 0x39, 0xb6, 0x4b,
+ 0x96, 0xe7, 0x8a, 0x47, 0xd0, 0xb6, 0xf3, 0x90, 0xe2, 0x7c, 0x7a, 0x68, 0x05, 0x60, 0xcb, 0xb0,
+ 0x9d, 0xee, 0x3d, 0x63, 0x70, 0xc7, 0x5d, 0x9e, 0x67, 0x62, 0x8c, 0x59, 0x22, 0xd7, 0x64, 0x29,
+ 0x56, 0x6a, 0x30, 0x86, 0xa2, 0xc2, 0x10, 0x13, 0x7e, 0x4f, 0x66, 0x79, 0xe1, 0xb4, 0x18, 0x2a,
+ 0xc2, 0xc8, 0x07, 0xf0, 0xa6, 0x42, 0x03, 0x27, 0x28, 0xa2, 0xaf, 0x6a, 0xb0, 0x10, 0xec, 0x07,
+ 0x21, 0xe9, 0xcb, 0x4e, 0x9c, 0x3b, 0xf5, 0x4e, 0x30, 0x0f, 0x42, 0x37, 0x41, 0x05, 0xa7, 0xa8,
+ 0xea, 0x07, 0x15, 0xae, 0x22, 0x8f, 0x70, 0x21, 0x7a, 0x09, 0xce, 0xf5, 0x49, 0xdf, 0xf3, 0xf7,
+ 0x57, 0xa3, 0x0b, 0xe4, 0xc2, 0xe7, 0xc1, 0x6c, 0x86, 0xf5, 0x24, 0x08, 0xa7, 0xeb, 0xd2, 0x9d,
+ 0x82, 0xcd, 0xd8, 0xb5, 0x6e, 0xdc, 0xbe, 0x12, 0xef, 0x14, 0xed, 0x14, 0x0c, 0x8f, 0xd4, 0x46,
+ 0x4d, 0x58, 0x12, 0x65, 0x6d, 0xaa, 0x7e, 0x05, 0xd7, 0x7c, 0x12, 0x1d, 0x47, 0x50, 0xad, 0x62,
+ 0xa9, 0x9d, 0x06, 0xe2, 0xd1, 0xfa, 0xf4, 0x2b, 0xe8, 0x0f, 0xb5, 0x17, 0x93, 0xf1, 0x57, 0xdc,
+ 0x4e, 0x82, 0x70, 0xba, 0x6e, 0xa4, 0x28, 0x26, 0xba, 0x30, 0x15, 0x7f, 0xc5, 0xed, 0x14, 0x0c,
+ 0x8f, 0xd4, 0xd6, 0xff, 0x74, 0x12, 0x3e, 0x30, 0x86, 0xf0, 0x46, 0xfd, 0xec, 0xe1, 0x3e, 0x66,
+ 0x1d, 0xad, 0x44, 0xe7, 0x17, 0x2b, 0xaf, 0x0e, 0x0d, 0x37, 0xb4, 0xc3, 0xfd, 0x31, 0xa7, 0x67,
+ 0x90, 0x33, 0x3d, 0x27, 0xa7, 0x37, 0xee, 0x74, 0x06, 0x79, 0xd3, 0x79, 0x72, 0x92, 0xe3, 0x4f,
+ 0x7f, 0x3f, 0x7b, 0xfa, 0x0b, 0x8e, 0xea, 0xb1, 0xec, 0x32, 0xc8, 0x61, 0x97, 0x82, 0xa3, 0x3a,
+ 0x06, 0x7b, 0xfd, 0x87, 0x49, 0xf8, 0xe0, 0x38, 0xbb, 0x48, 0x41, 0xfe, 0xca, 0x90, 0xd3, 0x0f,
+ 0x95, 0xbf, 0xf2, 0x42, 0x4e, 0x1e, 0x22, 0x7f, 0x65, 0x90, 0x7c, 0xd8, 0xfc, 0x95, 0x37, 0xaa,
+ 0x0f, 0x8b, 0xbf, 0xf2, 0x46, 0x75, 0x0c, 0xfe, 0xfa, 0x3f, 0xe9, 0xfd, 0x41, 0x6e, 0x61, 0x6d,
+ 0x98, 0x30, 0x07, 0xc3, 0x82, 0x42, 0x8a, 0x1d, 0xa9, 0x35, 0x3b, 0x1b, 0x98, 0xe2, 0x40, 0x18,
+ 0xa6, 0x39, 0xff, 0x14, 0x14, 0x41, 0x2c, 0x5c, 0x88, 0xb3, 0x24, 0x16, 0x98, 0xe8, 0x50, 0x91,
+ 0xc1, 0x36, 0xe9, 0x13, 0xdf, 0x70, 0xba, 0xa1, 0xe7, 0x1b, 0xbd, 0xa2, 0xd2, 0x86, 0x0d, 0xd5,
+ 0x5a, 0x0a, 0x17, 0x1e, 0xc1, 0x4e, 0x07, 0x64, 0x60, 0x5b, 0x05, 0xe5, 0x0b, 0x1b, 0x90, 0x4e,
+ 0xbb, 0x85, 0x29, 0x0e, 0xfd, 0x7b, 0x55, 0x50, 0xee, 0x57, 0x50, 0x83, 0xc5, 0x70, 0x1c, 0xef,
+ 0x5e, 0xc7, 0xb7, 0x77, 0x6d, 0x87, 0xf4, 0x88, 0x25, 0x03, 0xf0, 0x03, 0x71, 0xf2, 0xca, 0xb4,
+ 0xa7, 0xd5, 0xbc, 0x4a, 0x38, 0xbf, 0x3d, 0xd5, 0xe5, 0x96, 0xcc, 0xf4, 0x45, 0xad, 0x52, 0x47,
+ 0x2d, 0x23, 0xd7, 0xbe, 0xf8, 0x82, 0x1a, 0x29, 0xc6, 0xa3, 0x74, 0xd1, 0x5f, 0xd5, 0xb8, 0xb5,
+ 0x2d, 0x5d, 0x76, 0x62, 0xd2, 0x6e, 0x9c, 0x96, 0x97, 0x38, 0xb6, 0xdb, 0x63, 0xaf, 0x60, 0x92,
+ 0x22, 0xfa, 0x96, 0x06, 0x17, 0x77, 0xb2, 0xbc, 0x6f, 0x62, 0x6e, 0x3b, 0x85, 0xfb, 0x92, 0xe3,
+ 0xcf, 0xe3, 0xe7, 0x87, 0x99, 0x15, 0x70, 0x76, 0x4f, 0xe4, 0x38, 0x49, 0xd7, 0x89, 0x90, 0x03,
+ 0xc5, 0xc7, 0x29, 0xe5, 0x84, 0x89, 0xc7, 0x49, 0x02, 0x70, 0x92, 0x22, 0x7a, 0x0b, 0x66, 0x77,
+ 0x22, 0x57, 0x94, 0x30, 0x6e, 0x5b, 0x85, 0xc9, 0x2b, 0x0e, 0x2d, 0x7e, 0x52, 0x29, 0x0b, 0x71,
+ 0x4c, 0x05, 0xd9, 0x30, 0xb3, 0xc3, 0xa5, 0x91, 0x30, 0x4a, 0x1b, 0xe5, 0xf5, 0x65, 0x6e, 0x19,
+ 0x89, 0x22, 0x1c, 0xe1, 0x57, 0xa3, 0x38, 0xaa, 0xc7, 0xc4, 0x42, 0x7d, 0x43, 0x83, 0x8b, 0xbb,
+ 0xc4, 0x0f, 0x6d, 0x33, 0xed, 0xfe, 0x9c, 0x2d, 0xa1, 0xd4, 0xbf, 0x96, 0x85, 0x91, 0xb3, 0x4a,
+ 0x26, 0x08, 0x67, 0xf7, 0x41, 0xff, 0x2f, 0x1a, 0x8c, 0x78, 0x83, 0xd0, 0x2f, 0x6a, 0x30, 0xb7,
+ 0x45, 0x8c, 0x70, 0xe8, 0x93, 0xeb, 0x46, 0x28, 0xa3, 0x74, 0x5f, 0x3f, 0x15, 0x2f, 0xd4, 0xca,
+ 0x35, 0x05, 0x33, 0x3f, 0x88, 0x91, 0x17, 0x83, 0x55, 0x10, 0x4e, 0x74, 0xe1, 0xf2, 0xa7, 0x61,
+ 0x69, 0xa4, 0xe1, 0x89, 0xfc, 0xfd, 0xbf, 0x23, 0xfc, 0x89, 0xe9, 0x44, 0x70, 0x6f, 0xc0, 0x94,
+ 0x61, 0x59, 0x32, 0xd9, 0xcc, 0x0b, 0x05, 0x0f, 0x1d, 0x2d, 0x35, 0x1c, 0x9a, 0xfd, 0xc4, 0x1c,
+ 0x2f, 0xba, 0x06, 0xc8, 0x48, 0x1c, 0xaf, 0xad, 0x7b, 0x56, 0x64, 0x2e, 0x31, 0xef, 0xee, 0xea,
+ 0x08, 0x14, 0x67, 0xb4, 0xd0, 0x3f, 0x05, 0x0b, 0xc9, 0xeb, 0x76, 0x27, 0x88, 0xc8, 0xd3, 0xff,
+ 0xba, 0x06, 0x68, 0xf4, 0x2a, 0x3a, 0x0a, 0xa0, 0x2a, 0x6a, 0x44, 0x93, 0x5c, 0xcc, 0xd5, 0x98,
+ 0x0e, 0x16, 0x8c, 0x23, 0xd2, 0x45, 0x41, 0x80, 0x25, 0x21, 0xfd, 0xcf, 0x34, 0x88, 0x93, 0x5a,
+ 0xa0, 0x8f, 0x43, 0xcd, 0x22, 0x81, 0xe9, 0xdb, 0x83, 0x30, 0xfe, 0x10, 0x79, 0x85, 0xb8, 0x15,
+ 0x83, 0xb0, 0x5a, 0x0f, 0xe9, 0x30, 0x1d, 0x1a, 0xc1, 0x4e, 0xbb, 0x25, 0x0c, 0x47, 0xb6, 0xcd,
+ 0xdf, 0x65, 0x25, 0x58, 0x40, 0xe2, 0xbb, 0x6b, 0x13, 0x63, 0xdc, 0x5d, 0x43, 0x5b, 0xa7, 0x70,
+ 0x51, 0x0f, 0x1d, 0x7f, 0x49, 0x4f, 0xff, 0xb7, 0x15, 0x48, 0x26, 0x12, 0x29, 0x3a, 0x04, 0xa3,
+ 0x37, 0x0b, 0x2b, 0x0f, 0xed, 0x66, 0xe1, 0x47, 0x58, 0xd2, 0x2d, 0x9e, 0x2a, 0x91, 0x1f, 0x86,
+ 0xa8, 0x99, 0xb2, 0x78, 0x9e, 0x43, 0x59, 0x03, 0x3d, 0xaf, 0xc6, 0xfe, 0xcd, 0x36, 0x3e, 0x10,
+ 0xad, 0x0b, 0x16, 0xd0, 0xf7, 0x40, 0xdc, 0x9d, 0x94, 0xdf, 0x9f, 0x08, 0xf3, 0xfb, 0xb8, 0x88,
+ 0x5c, 0x9b, 0x4a, 0xdc, 0xef, 0x8c, 0x2e, 0x85, 0x2e, 0x25, 0x1a, 0xc6, 0xe1, 0x6c, 0xfa, 0xd7,
+ 0x34, 0x98, 0x11, 0x69, 0x05, 0xc6, 0x08, 0xd9, 0xec, 0xc1, 0x14, 0x53, 0xd7, 0x4b, 0x69, 0x32,
+ 0xdd, 0x6d, 0xcf, 0x0b, 0x13, 0xe9, 0x15, 0x58, 0xa8, 0x18, 0xfb, 0x17, 0x73, 0xfc, 0xfa, 0x37,
+ 0x26, 0xe1, 0x49, 0x51, 0x65, 0x64, 0x9b, 0x96, 0x8b, 0x70, 0x1f, 0xce, 0x8b, 0x69, 0x6a, 0xf9,
+ 0x86, 0x2d, 0xcf, 0x8b, 0x8a, 0x59, 0x60, 0xe2, 0x2c, 0x71, 0x04, 0x1d, 0xce, 0xa2, 0x81, 0x7e,
+ 0x0a, 0x2e, 0x88, 0xe2, 0x1b, 0xc4, 0x70, 0xc2, 0xed, 0x88, 0x76, 0x31, 0x6b, 0x8c, 0x1d, 0x60,
+ 0xaf, 0x67, 0xe0, 0xc3, 0x99, 0x54, 0x58, 0xb4, 0x97, 0x00, 0x34, 0x7d, 0x62, 0xa8, 0x87, 0x65,
+ 0x25, 0xa2, 0xbd, 0xd6, 0x33, 0x31, 0xe2, 0x1c, 0x4a, 0xcc, 0x95, 0x65, 0xec, 0x31, 0xcb, 0x18,
+ 0x93, 0xd0, 0xb7, 0x59, 0xc2, 0x0b, 0xca, 0xe0, 0xdc, 0x96, 0x4d, 0x82, 0x70, 0xba, 0x2e, 0x7a,
+ 0x01, 0x16, 0xd8, 0xf9, 0x5f, 0x7c, 0x5d, 0x6b, 0x2a, 0xce, 0x9c, 0x79, 0x3b, 0x01, 0xc1, 0xa9,
+ 0x9a, 0xfa, 0xaf, 0x6b, 0x30, 0xa7, 0x32, 0xd0, 0x18, 0x81, 0x9d, 0xbb, 0x8a, 0xc0, 0x2e, 0x13,
+ 0x2d, 0xa9, 0x92, 0x1d, 0x53, 0x66, 0x9f, 0xcf, 0x68, 0xc3, 0x0e, 0xad, 0x48, 0x4a, 0xf8, 0x97,
+ 0x3a, 0xb4, 0x1a, 0xd9, 0x49, 0xe4, 0xa1, 0x55, 0x1a, 0x82, 0x47, 0x08, 0xa3, 0xd7, 0x61, 0xc2,
+ 0xf4, 0x6d, 0x31, 0x30, 0x9f, 0x2c, 0x66, 0x9f, 0xe0, 0x76, 0x1c, 0x3b, 0xdf, 0xc4, 0x6d, 0x4c,
+ 0x31, 0xea, 0xbf, 0x3d, 0x01, 0x35, 0x25, 0x6b, 0x09, 0x5a, 0x2f, 0x63, 0xdf, 0xc6, 0xe8, 0x23,
+ 0x1b, 0x77, 0x1d, 0x26, 0x7a, 0x83, 0x61, 0x41, 0x03, 0x57, 0xa2, 0xbb, 0x4e, 0xd1, 0xf5, 0x06,
+ 0x43, 0xf4, 0x9a, 0x34, 0x99, 0x8b, 0x19, 0xb5, 0x32, 0xe0, 0x2e, 0x65, 0x36, 0x47, 0xec, 0x39,
+ 0x99, 0xcb, 0x9e, 0x2e, 0xcc, 0x04, 0xc2, 0x9e, 0x9e, 0x2a, 0x91, 0x34, 0x47, 0x19, 0x6a, 0x61,
+ 0x40, 0x73, 0x3d, 0x3c, 0xb2, 0xaf, 0x23, 0x22, 0x54, 0x0b, 0x18, 0xb2, 0x88, 0x77, 0x66, 0x62,
+ 0x54, 0xb9, 0x16, 0xb0, 0xc1, 0x4a, 0xb0, 0x80, 0xe8, 0xff, 0x4c, 0x03, 0x34, 0x8a, 0x10, 0x7d,
+ 0x00, 0xa6, 0x58, 0xe0, 0xbf, 0x58, 0x6c, 0x4a, 0xa6, 0x04, 0x23, 0x08, 0x30, 0x87, 0xa1, 0xd7,
+ 0xc5, 0xad, 0x8e, 0x62, 0x33, 0x23, 0xb7, 0x70, 0x41, 0x53, 0xb9, 0x06, 0x12, 0xed, 0x51, 0x13,
+ 0x79, 0x7b, 0x94, 0xfe, 0xc3, 0x0a, 0x65, 0x39, 0xdb, 0x0d, 0x89, 0xcb, 0xe2, 0x30, 0xef, 0x03,
+ 0x18, 0xc3, 0xd0, 0xe3, 0x7b, 0xb2, 0xe0, 0xbc, 0xcf, 0x14, 0x1c, 0x5d, 0x89, 0x75, 0x55, 0x62,
+ 0xe4, 0x87, 0x22, 0xf1, 0x6f, 0xac, 0x50, 0xa3, 0xb4, 0x43, 0xbb, 0x4f, 0x5e, 0xb7, 0x5d, 0xcb,
+ 0xbb, 0x27, 0x06, 0xa3, 0x34, 0xed, 0xbb, 0x12, 0x23, 0xa7, 0x1d, 0xff, 0xc6, 0x0a, 0x35, 0xf4,
+ 0x59, 0x58, 0x66, 0xf9, 0x77, 0x5d, 0x96, 0xc3, 0x49, 0x74, 0xce, 0x73, 0x9c, 0x68, 0x97, 0xa8,
+ 0x36, 0x1e, 0x3f, 0x3c, 0xa8, 0x2f, 0x37, 0x73, 0xea, 0xe0, 0xdc, 0xd6, 0xfa, 0xb7, 0x35, 0xb8,
+ 0x98, 0x39, 0x16, 0xe8, 0x3a, 0x2c, 0xc5, 0x07, 0xe2, 0xaa, 0x54, 0xab, 0xc6, 0x49, 0xc9, 0x6e,
+ 0xa6, 0x2b, 0xe0, 0xd1, 0x36, 0x68, 0x5d, 0x6e, 0xed, 0xaa, 0xd4, 0x14, 0xa7, 0xe9, 0x8f, 0x09,
+ 0x54, 0x59, 0x82, 0x15, 0x67, 0xb5, 0xd3, 0xbf, 0x90, 0xe8, 0x70, 0x3c, 0x60, 0x94, 0x99, 0x37,
+ 0x49, 0x4f, 0x46, 0xcc, 0x4a, 0x66, 0x6e, 0xd0, 0x42, 0xcc, 0x61, 0xe8, 0x09, 0x35, 0x6e, 0x5d,
+ 0x4a, 0x8d, 0x28, 0x76, 0x5d, 0x1f, 0x02, 0xac, 0x7b, 0xae, 0x1d, 0x7a, 0xbe, 0xed, 0xf6, 0x50,
+ 0x0f, 0xaa, 0x86, 0xc8, 0x0e, 0x2d, 0x98, 0xed, 0xa5, 0x62, 0x96, 0x91, 0x40, 0xc2, 0x23, 0xb9,
+ 0xa2, 0x5f, 0x58, 0x22, 0xd7, 0xff, 0xa1, 0x06, 0x97, 0xb2, 0xef, 0x64, 0x8c, 0xb1, 0x1d, 0xf6,
+ 0xa1, 0xe6, 0xc7, 0xcd, 0x04, 0x67, 0xfe, 0x84, 0x7a, 0x3b, 0x5b, 0xc9, 0x16, 0x4f, 0x55, 0x85,
+ 0xa6, 0xef, 0x05, 0xd1, 0xec, 0xa4, 0x2f, 0x6c, 0xcb, 0xe5, 0xaa, 0xf4, 0x04, 0xab, 0xf8, 0xf5,
+ 0xaf, 0x54, 0x00, 0x6e, 0x93, 0xf0, 0x9e, 0xe7, 0xef, 0xd0, 0x31, 0x7a, 0x57, 0xdd, 0x0d, 0x7a,
+ 0x1c, 0x26, 0x07, 0x9e, 0x15, 0x08, 0x89, 0xc2, 0x2e, 0xed, 0xb0, 0xd3, 0x5c, 0x56, 0x8a, 0xea,
+ 0x30, 0xc5, 0xfc, 0xb6, 0x42, 0x76, 0x33, 0x4d, 0x95, 0x6a, 0x27, 0x01, 0xe6, 0xe5, 0xe8, 0x69,
+ 0xa8, 0x8a, 0x20, 0xc3, 0x40, 0xe8, 0xde, 0x6c, 0xc2, 0x44, 0x38, 0x62, 0x80, 0x25, 0x54, 0xff,
+ 0x99, 0x49, 0x48, 0xe4, 0x37, 0x8f, 0x2d, 0xe8, 0xc9, 0x87, 0x64, 0x41, 0x7f, 0x16, 0x96, 0x1d,
+ 0xcf, 0xb0, 0x1a, 0x86, 0x43, 0xd9, 0xde, 0xef, 0xf2, 0xf9, 0x30, 0xdc, 0x1e, 0x89, 0xf2, 0x94,
+ 0x33, 0x11, 0x70, 0x2b, 0xa7, 0x0e, 0xce, 0x6d, 0x8d, 0x86, 0x4a, 0x5a, 0x75, 0xaa, 0x33, 0xac,
+ 0x97, 0xce, 0xff, 0xbe, 0xa2, 0x46, 0x98, 0xca, 0x8d, 0x34, 0x99, 0x7a, 0x1d, 0x7d, 0x55, 0x83,
+ 0x8b, 0x64, 0x2f, 0x24, 0xbe, 0x6b, 0x38, 0x77, 0x7d, 0x63, 0x6b, 0xcb, 0x36, 0x13, 0xf1, 0x32,
+ 0x9d, 0xc3, 0x83, 0xfa, 0xc5, 0xb5, 0xac, 0x0a, 0x0f, 0x0e, 0xea, 0xcf, 0x8d, 0x3e, 0x70, 0x10,
+ 0xc5, 0x93, 0x66, 0x36, 0x61, 0xec, 0x98, 0x4d, 0xee, 0xf2, 0xf3, 0x50, 0x3b, 0x41, 0xfc, 0xe4,
+ 0xac, 0xea, 0x4f, 0xf9, 0xe5, 0x69, 0x50, 0xa2, 0x79, 0x4f, 0x90, 0x5b, 0xef, 0xef, 0x6b, 0x70,
+ 0xc1, 0x74, 0x6c, 0xe2, 0x86, 0xa9, 0xa8, 0x65, 0xbe, 0x34, 0x5e, 0x2b, 0x16, 0x67, 0x3c, 0x20,
+ 0x6e, 0xbb, 0xd5, 0xf4, 0x5c, 0x97, 0x98, 0x61, 0x33, 0x03, 0x3b, 0x37, 0x4e, 0xb2, 0x20, 0x38,
+ 0xb3, 0x37, 0xec, 0x83, 0x58, 0x79, 0xbb, 0xa5, 0x5e, 0xe5, 0x69, 0x8a, 0x32, 0x2c, 0xa1, 0xe8,
+ 0x59, 0xa8, 0xf5, 0x7c, 0x6f, 0x38, 0x08, 0x9a, 0x2c, 0x92, 0x87, 0xaf, 0x30, 0x16, 0x2a, 0x75,
+ 0x3d, 0x2e, 0xc6, 0x6a, 0x1d, 0xf4, 0x31, 0x98, 0xe3, 0x3f, 0x3b, 0x3e, 0xd9, 0xb2, 0xf7, 0xc4,
+ 0x8a, 0x63, 0x81, 0x01, 0xd7, 0x95, 0x72, 0x9c, 0xa8, 0x85, 0x3e, 0x0c, 0xb3, 0x76, 0x10, 0x0c,
+ 0x89, 0xbf, 0x81, 0x6f, 0x89, 0xc4, 0x3d, 0xcc, 0x1b, 0xda, 0x8e, 0x0a, 0x71, 0x0c, 0x47, 0xbf,
+ 0xa4, 0xc1, 0x82, 0x4f, 0xde, 0x1a, 0xda, 0x3e, 0xb1, 0x18, 0xd1, 0x40, 0xc4, 0x54, 0x77, 0x4b,
+ 0x06, 0x72, 0xaf, 0xe0, 0x04, 0x56, 0xce, 0xe9, 0xd2, 0x8f, 0x90, 0x04, 0xe2, 0x54, 0x17, 0xe8,
+ 0x58, 0x05, 0x76, 0xcf, 0xb5, 0xdd, 0xde, 0xaa, 0xd3, 0x0b, 0x96, 0xab, 0x6c, 0xf5, 0xb2, 0xb1,
+ 0xea, 0xc6, 0xc5, 0x58, 0xad, 0x83, 0x3e, 0x01, 0xf3, 0xc3, 0x80, 0xf2, 0x6e, 0x9f, 0xf0, 0x01,
+ 0x9e, 0x8d, 0x43, 0xec, 0x36, 0x54, 0x00, 0x4e, 0xd6, 0xa3, 0xa6, 0x59, 0x54, 0x20, 0x86, 0x19,
+ 0xf8, 0xe5, 0x5c, 0xda, 0xcf, 0x8d, 0x04, 0x04, 0xa7, 0x6a, 0x5e, 0x5e, 0x85, 0xf3, 0x19, 0x9f,
+ 0x79, 0xa2, 0x05, 0xf2, 0xeb, 0x15, 0x78, 0xff, 0xb1, 0x6c, 0x89, 0x7e, 0x59, 0x83, 0x1a, 0xd9,
+ 0x0b, 0x7d, 0x43, 0x06, 0xfc, 0xd1, 0x39, 0xea, 0x3d, 0x9c, 0x45, 0xb0, 0xb2, 0x16, 0x53, 0xe2,
+ 0xf3, 0x26, 0xf7, 0x3c, 0x05, 0x82, 0xd5, 0x0e, 0x51, 0x15, 0x9b, 0x5f, 0xde, 0x56, 0x1d, 0x6d,
+ 0x22, 0x21, 0xb4, 0x80, 0x5c, 0x7e, 0x19, 0x16, 0xd3, 0x98, 0x4f, 0x34, 0x54, 0xbf, 0x55, 0x81,
+ 0xa9, 0x8e, 0x63, 0xb8, 0x67, 0xf1, 0xd4, 0xc0, 0x17, 0x13, 0xf9, 0x3f, 0x8a, 0x65, 0x55, 0x61,
+ 0x7d, 0xcd, 0xcd, 0x0f, 0xb4, 0x9d, 0xca, 0x0f, 0xf4, 0x4a, 0x09, 0x1a, 0x47, 0xa7, 0x03, 0xfa,
+ 0x9e, 0x06, 0xb3, 0xac, 0xde, 0x19, 0xe4, 0x11, 0x79, 0x23, 0x99, 0x47, 0xe4, 0x85, 0xe2, 0x1f,
+ 0x95, 0x93, 0x36, 0xe4, 0x8f, 0xa3, 0x8f, 0x61, 0x59, 0x42, 0x3e, 0xa7, 0x66, 0xc0, 0xe7, 0x5f,
+ 0xf3, 0x74, 0x56, 0xd6, 0x9d, 0x5b, 0x9e, 0x69, 0x38, 0x69, 0x4d, 0xee, 0xe8, 0x34, 0xf8, 0x2e,
+ 0xcc, 0x12, 0x71, 0xab, 0x3e, 0xfa, 0x9a, 0x62, 0xba, 0x6d, 0x74, 0x37, 0x3f, 0xa6, 0x17, 0x95,
+ 0x04, 0x38, 0x26, 0xa1, 0xff, 0x4e, 0x05, 0x6a, 0xca, 0x6c, 0xbe, 0x23, 0xd9, 0x7d, 0xae, 0x65,
+ 0xe6, 0x8b, 0xae, 0xb0, 0x80, 0xbe, 0x4b, 0x27, 0x48, 0x36, 0x1f, 0x40, 0xcd, 0x8c, 0xd3, 0x15,
+ 0x96, 0x62, 0x70, 0x25, 0xed, 0xa1, 0x08, 0x2c, 0x8e, 0x0b, 0xb0, 0x4a, 0x45, 0xff, 0x17, 0x15,
+ 0x98, 0xe9, 0xf8, 0x1e, 0x9d, 0xe3, 0x33, 0x10, 0x10, 0x9b, 0x09, 0x01, 0x51, 0x70, 0xf1, 0xf2,
+ 0xde, 0xe6, 0x8a, 0x88, 0x37, 0x53, 0x22, 0xa2, 0x51, 0x8a, 0xca, 0xd1, 0x42, 0xe2, 0x0f, 0x35,
+ 0xa8, 0x89, 0x9a, 0x67, 0x20, 0x26, 0x8c, 0xa4, 0x98, 0x78, 0xb1, 0xcc, 0x87, 0xe5, 0x08, 0x8a,
+ 0xbf, 0xa7, 0xc1, 0xbc, 0xa8, 0xb1, 0x4e, 0xfa, 0x9b, 0xc4, 0x47, 0xd7, 0x60, 0x26, 0x18, 0xb2,
+ 0xb9, 0x14, 0x5f, 0xf4, 0x98, 0x2a, 0x2a, 0xfc, 0x4d, 0xc3, 0x64, 0xef, 0x1e, 0xf0, 0x2a, 0x4a,
+ 0x62, 0x2e, 0x5e, 0x80, 0xa3, 0xc6, 0xd4, 0xa0, 0xf3, 0x3d, 0x67, 0xe4, 0xda, 0x3c, 0xf6, 0x1c,
+ 0x82, 0x19, 0x84, 0xda, 0x51, 0xf4, 0x6f, 0x74, 0xa4, 0xc4, 0xec, 0x28, 0x0a, 0x0e, 0x30, 0x2f,
+ 0xd7, 0x7f, 0x6e, 0x52, 0x8e, 0x36, 0x93, 0x63, 0x37, 0x60, 0xd6, 0xf4, 0x89, 0x11, 0x12, 0xab,
+ 0xb1, 0x3f, 0x4e, 0xe7, 0x98, 0x42, 0xd7, 0x8c, 0x5a, 0xe0, 0xb8, 0x31, 0x55, 0x9d, 0xd4, 0x53,
+ 0xa2, 0x4a, 0xac, 0x66, 0xe6, 0x9e, 0x10, 0xbd, 0x08, 0x53, 0xde, 0x3d, 0x57, 0xc6, 0x49, 0x1c,
+ 0x49, 0x98, 0x7d, 0xca, 0x1d, 0x5a, 0x1b, 0xf3, 0x46, 0x6a, 0xa2, 0x87, 0xc9, 0x23, 0x12, 0x3d,
+ 0xf4, 0x61, 0xa6, 0xcf, 0xa6, 0x21, 0x4a, 0xcb, 0x50, 0x8a, 0x99, 0xf9, 0x8c, 0xaa, 0x09, 0x29,
+ 0x19, 0x6a, 0x1c, 0xd1, 0xa0, 0x4a, 0x30, 0xd5, 0xd3, 0x82, 0x81, 0x61, 0x12, 0x55, 0x09, 0xbe,
+ 0x1d, 0x15, 0xe2, 0x18, 0x8e, 0xee, 0x43, 0x8d, 0xdf, 0x64, 0xe6, 0xb2, 0x76, 0xa6, 0x84, 0x4f,
+ 0x52, 0xf4, 0xef, 0x6e, 0x8c, 0x8e, 0x0f, 0xbe, 0x52, 0x80, 0x55, 0x62, 0xfa, 0x2f, 0x4c, 0x48,
+ 0x36, 0x15, 0x82, 0x3f, 0x3b, 0x69, 0xbf, 0x56, 0xe8, 0xd5, 0x8f, 0xe7, 0x60, 0x6a, 0xb0, 0x6d,
+ 0x04, 0x11, 0xaf, 0x46, 0xd9, 0x53, 0xa7, 0x3a, 0xb4, 0xf0, 0xc1, 0x41, 0x7d, 0x4e, 0x90, 0x66,
+ 0xbf, 0x31, 0xaf, 0x8b, 0x86, 0x70, 0x3e, 0x08, 0x0d, 0x87, 0x74, 0x6d, 0xe1, 0x3e, 0x0a, 0x42,
+ 0xa3, 0x3f, 0x28, 0x90, 0x04, 0x95, 0x9d, 0x32, 0x75, 0x47, 0x51, 0xe1, 0x2c, 0xfc, 0xe8, 0x67,
+ 0x34, 0x58, 0x66, 0xe5, 0xab, 0xc3, 0xd0, 0xe3, 0x99, 0x8d, 0x63, 0xe2, 0x27, 0x3f, 0x64, 0x65,
+ 0xc6, 0x7e, 0x37, 0x07, 0x1f, 0xce, 0xa5, 0xa4, 0xff, 0x5f, 0x0d, 0xd0, 0xe8, 0x2c, 0xa2, 0x3e,
+ 0x54, 0x2d, 0xb2, 0x65, 0x0c, 0x9d, 0x30, 0xda, 0x8c, 0x8b, 0x5d, 0x75, 0x8d, 0x71, 0xc6, 0x02,
+ 0xb2, 0x25, 0x10, 0x63, 0x49, 0x02, 0x0d, 0x60, 0xf6, 0xde, 0xb6, 0x1d, 0x12, 0xc7, 0x0e, 0x42,
+ 0x21, 0x24, 0x4b, 0xd3, 0x93, 0xfa, 0xc7, 0xeb, 0x11, 0x66, 0x1c, 0x13, 0xd1, 0xff, 0xda, 0x04,
+ 0x54, 0x4f, 0xf0, 0x38, 0xd3, 0x10, 0x90, 0xb8, 0xe8, 0x4e, 0x95, 0x16, 0x52, 0xc6, 0x6f, 0xc5,
+ 0x34, 0x8b, 0xe6, 0x08, 0x32, 0x9c, 0x41, 0x00, 0xbd, 0x0d, 0x17, 0x6c, 0x77, 0xcb, 0x37, 0x82,
+ 0xd0, 0x1f, 0x9a, 0xe1, 0xd0, 0x8f, 0x08, 0x17, 0x4a, 0x50, 0xc2, 0x8c, 0xfd, 0x76, 0x06, 0x3a,
+ 0x9c, 0x49, 0x04, 0x6d, 0xc1, 0xcc, 0x3d, 0xcf, 0xdf, 0xa1, 0x32, 0x6c, 0xb2, 0x44, 0x2e, 0xf8,
+ 0xd7, 0x19, 0x8e, 0x58, 0x78, 0xf1, 0xdf, 0x01, 0x8e, 0x90, 0xeb, 0x7f, 0xa0, 0xc1, 0x14, 0xbf,
+ 0x2c, 0xf5, 0xde, 0x30, 0x74, 0x58, 0x5f, 0x73, 0xf3, 0x1a, 0x52, 0xf3, 0x83, 0xd5, 0x78, 0xaf,
+ 0x98, 0x1f, 0xac, 0xb3, 0x39, 0x5a, 0xc5, 0x1f, 0x4c, 0x88, 0x8f, 0x61, 0xdb, 0x76, 0x1b, 0xce,
+ 0x0b, 0x0d, 0xf4, 0x96, 0xbd, 0x45, 0x28, 0x83, 0xb5, 0x8c, 0xfd, 0x40, 0x5c, 0x00, 0x66, 0xd2,
+ 0xaf, 0x39, 0x0a, 0xc6, 0x59, 0x6d, 0xd0, 0xbf, 0xd4, 0xe8, 0x06, 0x19, 0xfa, 0xb6, 0x59, 0x2e,
+ 0x07, 0xa3, 0xec, 0xdc, 0xca, 0x3a, 0xc7, 0xc6, 0x2d, 0xf8, 0x8d, 0x78, 0xa7, 0x64, 0xa5, 0x0f,
+ 0x0e, 0xea, 0xf5, 0x0c, 0x0f, 0x61, 0xe4, 0xd3, 0xa6, 0x43, 0xfb, 0x95, 0x1f, 0x1e, 0x59, 0x85,
+ 0xf9, 0xd7, 0xa3, 0x2e, 0xa3, 0x1b, 0x30, 0x15, 0x98, 0xde, 0x80, 0x1c, 0xf5, 0x0c, 0x59, 0xda,
+ 0xfe, 0x92, 0x23, 0xdc, 0xa5, 0x2d, 0x31, 0x47, 0x70, 0xf9, 0x4d, 0x98, 0x53, 0x7b, 0x9e, 0xe1,
+ 0x21, 0x68, 0xa9, 0x1e, 0x82, 0x13, 0x9f, 0xb7, 0xa9, 0x1e, 0x85, 0xdf, 0xad, 0x80, 0x78, 0xc9,
+ 0x65, 0x8c, 0x53, 0x84, 0x37, 0xa3, 0x9c, 0x73, 0x65, 0x9e, 0xaf, 0x49, 0xbf, 0x09, 0x19, 0x0f,
+ 0x82, 0x9a, 0x76, 0x0e, 0x79, 0x30, 0xed, 0x18, 0x9b, 0xc4, 0x89, 0x5e, 0xf5, 0xb8, 0x5e, 0xe2,
+ 0xd1, 0x09, 0x9e, 0xa7, 0x39, 0x48, 0xf9, 0x9a, 0x79, 0x21, 0x16, 0x64, 0x2e, 0x3f, 0x0f, 0x35,
+ 0xa5, 0xda, 0x89, 0xdc, 0x32, 0x7f, 0xa8, 0xc1, 0x5c, 0x22, 0xf1, 0x54, 0x1f, 0x26, 0x7c, 0x69,
+ 0x8e, 0x17, 0x3d, 0x66, 0x89, 0xa2, 0x81, 0x1e, 0x3b, 0xa2, 0x12, 0xa6, 0x74, 0x64, 0x8e, 0xaa,
+ 0xca, 0x69, 0xe5, 0xa8, 0xfa, 0xba, 0x06, 0x97, 0xa2, 0x0f, 0x4a, 0x26, 0x36, 0x40, 0x4f, 0x43,
+ 0xd5, 0x18, 0xd8, 0xcc, 0xf9, 0xaa, 0xfa, 0xaf, 0x57, 0x3b, 0x6d, 0x56, 0x86, 0x25, 0x14, 0x7d,
+ 0x04, 0xaa, 0x11, 0xeb, 0x09, 0xe5, 0x4b, 0xca, 0x2d, 0x79, 0x70, 0x24, 0x6b, 0xa0, 0xa7, 0x94,
+ 0xbc, 0x80, 0x53, 0xf1, 0x46, 0x2d, 0x09, 0xf3, 0xf3, 0x60, 0xfd, 0x6b, 0x15, 0x98, 0xe7, 0xce,
+ 0x8a, 0x86, 0xed, 0x5a, 0xb6, 0xdb, 0x3b, 0x83, 0x0d, 0x22, 0xf1, 0xc8, 0x60, 0xe5, 0xb4, 0x1e,
+ 0x19, 0xbc, 0x09, 0xd3, 0x6f, 0x51, 0x49, 0x15, 0x31, 0xf8, 0x58, 0x02, 0x43, 0x32, 0x2f, 0x13,
+ 0x72, 0x01, 0x16, 0x28, 0xf4, 0x3f, 0xd1, 0x60, 0x29, 0x31, 0x2c, 0x67, 0xb0, 0xd3, 0xf4, 0x92,
+ 0x3b, 0x4d, 0xa3, 0x60, 0x82, 0x0f, 0xa5, 0xd3, 0x39, 0x3b, 0xce, 0x6f, 0x56, 0x80, 0x3d, 0x69,
+ 0x76, 0x06, 0x53, 0xfd, 0x46, 0x42, 0x17, 0x78, 0xa9, 0xf8, 0xdb, 0x33, 0x79, 0x0e, 0x8d, 0x5e,
+ 0xca, 0xa1, 0xf1, 0xe9, 0xe2, 0x24, 0x8e, 0xf6, 0x66, 0xfc, 0x4a, 0x05, 0x80, 0x56, 0xe3, 0x8f,
+ 0xbf, 0x89, 0xb8, 0xc4, 0xf8, 0x31, 0xd0, 0xd9, 0x77, 0xcb, 0x0b, 0x9e, 0xba, 0x7c, 0x5f, 0x6c,
+ 0x22, 0x76, 0x9b, 0x27, 0xdf, 0x16, 0x4b, 0x2e, 0xc0, 0xc9, 0x53, 0x5a, 0x80, 0xfa, 0x6f, 0x6a,
+ 0xc0, 0x52, 0x8e, 0xb7, 0x6e, 0x77, 0xd1, 0x27, 0x60, 0xde, 0xe6, 0xe7, 0x92, 0x2d, 0x35, 0xa3,
+ 0x16, 0x3b, 0x3b, 0x69, 0xab, 0x00, 0x9c, 0xac, 0x87, 0x5c, 0x65, 0x5c, 0xcb, 0xbc, 0xbc, 0x28,
+ 0x3a, 0x22, 0x73, 0xea, 0xce, 0x65, 0xcf, 0x8c, 0xfe, 0xa3, 0x0a, 0x9c, 0x4b, 0xd5, 0x1d, 0xc3,
+ 0x54, 0x79, 0x38, 0x12, 0x4c, 0x49, 0xb3, 0x3b, 0x71, 0x06, 0x69, 0x76, 0x65, 0xc6, 0xdb, 0xc9,
+ 0x87, 0x9c, 0xf1, 0xf6, 0xbb, 0x1a, 0x54, 0xe9, 0x18, 0x9f, 0x81, 0x0c, 0xfd, 0xc9, 0xa4, 0x0c,
+ 0x7d, 0xbe, 0x30, 0xef, 0xe4, 0x88, 0xce, 0x3f, 0xd3, 0x80, 0xbd, 0x48, 0x21, 0x82, 0x31, 0x94,
+ 0xf8, 0x06, 0x2d, 0x27, 0xbe, 0xe1, 0x49, 0x11, 0x1e, 0x91, 0x72, 0xed, 0x29, 0x21, 0x12, 0x1f,
+ 0x51, 0x22, 0x20, 0x26, 0x92, 0xa2, 0x64, 0x34, 0x0a, 0x02, 0xbd, 0x0d, 0xf3, 0xc1, 0xb6, 0xe7,
+ 0x85, 0x91, 0x85, 0x2f, 0x66, 0xaf, 0x51, 0x3c, 0x94, 0x38, 0xfa, 0x16, 0xbe, 0x3a, 0xbb, 0x2a,
+ 0x72, 0x9c, 0xa4, 0xa5, 0xff, 0x9e, 0xf8, 0xfc, 0x13, 0x2c, 0x95, 0x33, 0x14, 0x7d, 0x1f, 0x4a,
+ 0x89, 0xbe, 0xbc, 0xa7, 0x15, 0x7f, 0x4b, 0x7c, 0x85, 0x7c, 0xb0, 0xcc, 0x81, 0x79, 0x47, 0x7d,
+ 0x79, 0x44, 0x30, 0x66, 0xa1, 0x47, 0x4b, 0xc4, 0x7b, 0x9d, 0x4a, 0x11, 0x4e, 0x22, 0xa7, 0xb2,
+ 0x31, 0xea, 0xb8, 0xfa, 0x0e, 0x3b, 0x6b, 0xd8, 0x51, 0x01, 0x38, 0x59, 0x4f, 0x7f, 0x15, 0x3e,
+ 0xc8, 0xbb, 0xcd, 0xe2, 0xb7, 0xd7, 0xf6, 0x4c, 0x12, 0x04, 0x4d, 0x63, 0x60, 0x98, 0xd4, 0x06,
+ 0x61, 0x37, 0x20, 0xb9, 0x8b, 0xee, 0x99, 0x74, 0x06, 0x54, 0xe9, 0x17, 0x48, 0x67, 0x41, 0xd5,
+ 0xbf, 0x5c, 0x81, 0xba, 0x82, 0x33, 0x11, 0xc9, 0x12, 0x71, 0xdc, 0x37, 0x35, 0xa8, 0x19, 0xae,
+ 0xeb, 0x85, 0x86, 0x7a, 0x70, 0x44, 0x4a, 0xbc, 0x23, 0x97, 0x4b, 0x6b, 0x65, 0x35, 0xa6, 0x93,
+ 0x3a, 0x27, 0x56, 0x20, 0x58, 0xed, 0xce, 0xe5, 0x97, 0x61, 0x31, 0xdd, 0xea, 0x44, 0xc6, 0x46,
+ 0x03, 0x2e, 0x2a, 0xbd, 0x12, 0x17, 0xc9, 0xa8, 0x22, 0xfc, 0x0c, 0xcc, 0xec, 0xda, 0x81, 0x1d,
+ 0x5d, 0x4a, 0x56, 0x86, 0xf1, 0x35, 0x5e, 0x8c, 0x23, 0xb8, 0xfe, 0x0a, 0x9c, 0x57, 0x71, 0xb0,
+ 0x35, 0x73, 0xbb, 0x7b, 0x92, 0x89, 0x58, 0x87, 0x27, 0x15, 0x0c, 0x99, 0x57, 0xa9, 0x4e, 0x82,
+ 0xee, 0x67, 0xa7, 0x23, 0x16, 0x17, 0xb1, 0xfe, 0xdf, 0xd1, 0xe0, 0x51, 0x92, 0xc7, 0x31, 0x82,
+ 0xdf, 0x3f, 0x57, 0x76, 0x4a, 0x73, 0x59, 0x52, 0xa4, 0xae, 0xc9, 0x03, 0xe3, 0xfc, 0xae, 0xa1,
+ 0xfb, 0x00, 0x81, 0x9c, 0x93, 0x52, 0x21, 0xa0, 0x99, 0xb3, 0x2c, 0xf2, 0x95, 0xca, 0xdf, 0x58,
+ 0xa1, 0x86, 0x7c, 0xa8, 0x06, 0x62, 0x2e, 0x4b, 0xdd, 0xf8, 0xcc, 0xe0, 0x0d, 0x11, 0xe5, 0x26,
+ 0x7e, 0x61, 0x49, 0x07, 0xfd, 0x9a, 0x06, 0x17, 0x9c, 0x8c, 0xa5, 0x21, 0xe4, 0xfc, 0xdd, 0x87,
+ 0xb1, 0xec, 0xb8, 0xdb, 0x32, 0x0b, 0x82, 0x33, 0xfb, 0x82, 0xfe, 0x41, 0xee, 0xdd, 0x42, 0x1e,
+ 0x7d, 0xbd, 0x51, 0xb6, 0x97, 0xa7, 0x75, 0xcd, 0xf0, 0xdf, 0xcc, 0x70, 0xd5, 0x83, 0xf9, 0xd6,
+ 0x4c, 0x98, 0xde, 0x64, 0xda, 0xbb, 0xe0, 0xf7, 0xe2, 0xb6, 0x82, 0x78, 0xae, 0x9e, 0xe9, 0xd6,
+ 0xfc, 0x7f, 0x2c, 0x50, 0xa3, 0x15, 0x80, 0x4d, 0xc7, 0x33, 0x77, 0x9a, 0xed, 0x16, 0x8e, 0x44,
+ 0x3b, 0xe3, 0xb1, 0x86, 0x2c, 0xc5, 0x4a, 0x0d, 0xf4, 0x05, 0x98, 0xb0, 0xa4, 0xc2, 0xf7, 0x62,
+ 0x19, 0x5d, 0x37, 0x0e, 0xad, 0xa5, 0x1c, 0x45, 0xb1, 0x22, 0x0f, 0xaa, 0xae, 0xd8, 0xdd, 0x05,
+ 0xff, 0x14, 0x7f, 0xfe, 0x53, 0xaa, 0x09, 0x52, 0x3b, 0x89, 0x4a, 0xb0, 0x24, 0x42, 0x09, 0x4a,
+ 0xf5, 0x7d, 0xaa, 0x24, 0x41, 0xa9, 0xbb, 0x1f, 0x65, 0x59, 0x75, 0x54, 0x4d, 0x7c, 0x7a, 0x7c,
+ 0x4d, 0x7c, 0x3e, 0x57, 0x0b, 0xdf, 0x82, 0xe9, 0xd0, 0x60, 0x21, 0x1a, 0x33, 0x25, 0x22, 0x24,
+ 0xe8, 0x07, 0xdc, 0xa5, 0x68, 0x62, 0x2d, 0x84, 0xfd, 0x0c, 0xb0, 0xc0, 0x4e, 0xb9, 0x91, 0x3f,
+ 0x4d, 0x2a, 0x52, 0x47, 0x15, 0xe7, 0x46, 0xfe, 0xd6, 0x03, 0xe7, 0x46, 0xfe, 0x3f, 0x16, 0xa8,
+ 0xd1, 0x0e, 0xd5, 0x2d, 0xf9, 0x16, 0x20, 0xae, 0xfd, 0xae, 0x96, 0x5d, 0x9a, 0x41, 0x14, 0xa0,
+ 0xcb, 0x7f, 0x61, 0x49, 0x00, 0x99, 0x30, 0x23, 0x8c, 0x39, 0x91, 0x90, 0xf3, 0xc5, 0x32, 0x49,
+ 0x91, 0xa3, 0x27, 0x4c, 0xf8, 0x1d, 0xbf, 0x08, 0xb3, 0xfe, 0xdb, 0x13, 0xdc, 0x0e, 0x7f, 0x07,
+ 0x63, 0x5a, 0x7a, 0x50, 0x8d, 0x90, 0x95, 0x72, 0x6c, 0x44, 0x39, 0xf6, 0xf9, 0x80, 0xca, 0x8c,
+ 0xfb, 0x12, 0x39, 0x6a, 0x66, 0x5d, 0x07, 0x50, 0x92, 0x0d, 0x8d, 0x75, 0x15, 0x20, 0xfb, 0xf0,
+ 0x77, 0xb2, 0xd0, 0xe1, 0xef, 0x4b, 0x70, 0x2e, 0x8a, 0x8f, 0xb1, 0x08, 0xf3, 0x7d, 0x8b, 0x08,
+ 0x52, 0x76, 0x67, 0xad, 0x99, 0x04, 0xe1, 0x74, 0x5d, 0xfd, 0x26, 0xcc, 0xca, 0x75, 0x81, 0x9e,
+ 0x50, 0x74, 0xb4, 0x58, 0x74, 0xdd, 0x24, 0xfb, 0x5c, 0x61, 0xab, 0x27, 0x14, 0x36, 0x6e, 0x58,
+ 0xbd, 0x46, 0x0b, 0x84, 0xee, 0xa6, 0xff, 0x37, 0x8d, 0x33, 0x82, 0x78, 0x47, 0xc5, 0x80, 0x5a,
+ 0x9f, 0x27, 0x2f, 0x62, 0x89, 0x33, 0x8b, 0xdd, 0x90, 0x62, 0x07, 0xeb, 0xeb, 0x31, 0x1a, 0xac,
+ 0xe2, 0x44, 0x7b, 0xa3, 0x8f, 0xfd, 0x5c, 0x2f, 0xb9, 0x68, 0xc7, 0x7e, 0xf3, 0x07, 0x8d, 0xb6,
+ 0x51, 0x1f, 0x56, 0xd1, 0x8e, 0x7e, 0x58, 0xe5, 0xf8, 0xf7, 0x21, 0xf4, 0x7f, 0xaa, 0x41, 0x66,
+ 0x0e, 0x60, 0xa4, 0xc3, 0x34, 0x0f, 0xee, 0x55, 0x9f, 0x41, 0xe2, 0x91, 0xbf, 0x58, 0x40, 0x90,
+ 0x0f, 0x17, 0x44, 0xe4, 0xec, 0x4d, 0xb2, 0x1f, 0x3f, 0xbf, 0x23, 0x56, 0xc7, 0xf8, 0xf1, 0x73,
+ 0x2c, 0xab, 0x49, 0x37, 0x85, 0x09, 0x67, 0xe2, 0x66, 0xb1, 0x9b, 0x4c, 0x7f, 0x7a, 0x8f, 0x1c,
+ 0x69, 0xb2, 0xbe, 0x3e, 0xe4, 0xd8, 0x4d, 0x4e, 0xe3, 0xf8, 0xd8, 0x4d, 0x56, 0xef, 0xbd, 0x72,
+ 0x78, 0xca, 0x3a, 0x9b, 0xe3, 0x8f, 0xf9, 0x7d, 0x0d, 0x96, 0x46, 0xee, 0x43, 0x8f, 0x71, 0xf2,
+ 0x76, 0x86, 0x5e, 0x89, 0xa7, 0xd2, 0x0f, 0xaf, 0xd4, 0x32, 0x13, 0x25, 0xbc, 0x0e, 0xf3, 0x09,
+ 0x6f, 0x8c, 0xbc, 0x57, 0xa3, 0x65, 0xde, 0xab, 0x51, 0xaf, 0xcd, 0x54, 0x8e, 0xbc, 0x36, 0xf3,
+ 0xbf, 0xe7, 0xc4, 0x7c, 0x33, 0x1d, 0xf8, 0x0d, 0x98, 0x66, 0x77, 0x5b, 0xa2, 0x97, 0xbc, 0x3e,
+ 0x55, 0xfc, 0xd2, 0x4c, 0xc0, 0x45, 0x01, 0xff, 0x1f, 0x0b, 0xb4, 0xa8, 0x05, 0x8b, 0xec, 0xb1,
+ 0xe7, 0x8e, 0xef, 0x6d, 0xd9, 0x0e, 0xb9, 0x1d, 0x4b, 0x1d, 0x79, 0xa1, 0xb6, 0x99, 0x82, 0xe3,
+ 0x91, 0x16, 0xa8, 0xab, 0x6a, 0xc5, 0x9f, 0x2c, 0xea, 0x99, 0xe4, 0x09, 0x8c, 0xa4, 0x36, 0xec,
+ 0x03, 0x90, 0x68, 0xea, 0xa2, 0x78, 0x8b, 0x97, 0x0b, 0x5e, 0x16, 0x96, 0x1c, 0x10, 0xc9, 0x0d,
+ 0x59, 0x14, 0x60, 0x85, 0x0a, 0x0a, 0xa0, 0xb6, 0x1d, 0xbf, 0xf5, 0x22, 0x74, 0xe2, 0x57, 0xca,
+ 0x3e, 0x34, 0xc3, 0x37, 0x2a, 0xa5, 0x00, 0xab, 0x54, 0x50, 0x90, 0x78, 0x67, 0xbb, 0x4c, 0x1a,
+ 0xfd, 0x58, 0xc3, 0x38, 0xee, 0x8d, 0x6d, 0x4a, 0xd4, 0x95, 0x57, 0xd4, 0x44, 0xc4, 0x5b, 0x31,
+ 0xa2, 0xf1, 0x4d, 0xb7, 0x98, 0x68, 0x5c, 0x86, 0x15, 0x32, 0x74, 0x78, 0xfb, 0xf1, 0xc5, 0x44,
+ 0xa1, 0x49, 0xbf, 0x52, 0xf6, 0x86, 0xa8, 0xd0, 0x03, 0xe2, 0x02, 0xac, 0x52, 0x41, 0x1e, 0x40,
+ 0x5f, 0x5e, 0x58, 0x14, 0x6a, 0x75, 0xb1, 0x2f, 0x8d, 0xef, 0x3d, 0x72, 0x1b, 0x31, 0xfe, 0x8d,
+ 0x15, 0x12, 0x54, 0x8b, 0x97, 0x56, 0x15, 0x94, 0x50, 0x38, 0xc7, 0xb2, 0xa8, 0x3e, 0x1e, 0x6b,
+ 0x15, 0x35, 0xb6, 0x6e, 0x1f, 0x53, 0x34, 0x8a, 0x07, 0x07, 0xf5, 0x39, 0x26, 0x4c, 0x46, 0x34,
+ 0x8c, 0xd8, 0xf9, 0x3a, 0x77, 0x94, 0xf3, 0x15, 0x5d, 0x87, 0xa5, 0x40, 0x3d, 0xa3, 0x64, 0x02,
+ 0x62, 0x9e, 0x35, 0x91, 0x57, 0x5c, 0xbb, 0xe9, 0x0a, 0x78, 0xb4, 0x0d, 0x97, 0x80, 0xc4, 0x62,
+ 0xed, 0x17, 0x54, 0x09, 0xc8, 0xcb, 0xb0, 0x84, 0xa2, 0x7b, 0xa9, 0x27, 0xa9, 0xcf, 0x95, 0x36,
+ 0x84, 0xc6, 0x7b, 0xa0, 0x1a, 0xfd, 0x94, 0xfa, 0x74, 0xd8, 0x62, 0x89, 0x10, 0x9c, 0x22, 0x6f,
+ 0x87, 0xa1, 0xdd, 0x64, 0x0c, 0xea, 0xd2, 0xe9, 0x84, 0xfc, 0x49, 0x87, 0x6c, 0x6e, 0xfc, 0xe9,
+ 0xbf, 0xa3, 0x8a, 0x79, 0xa4, 0x88, 0x90, 0x33, 0xd0, 0xce, 0x48, 0x42, 0x3b, 0x6b, 0x96, 0xd3,
+ 0x9c, 0x48, 0x6e, 0xd4, 0xd9, 0x1f, 0x69, 0xb0, 0x10, 0x57, 0x3b, 0x03, 0xed, 0xc9, 0x4a, 0x6a,
+ 0x4f, 0x9f, 0x2e, 0xf9, 0x61, 0x39, 0x2a, 0xd4, 0xff, 0xaf, 0xa8, 0x9f, 0xc5, 0x94, 0x84, 0x7b,
+ 0x8a, 0x81, 0xcb, 0x4d, 0xea, 0x76, 0x29, 0x03, 0x57, 0x8d, 0xe5, 0x89, 0xbf, 0x38, 0xc3, 0xe0,
+ 0xfd, 0x52, 0x62, 0x87, 0x2e, 0x13, 0xb4, 0x96, 0xfd, 0x06, 0xe1, 0xb1, 0xdb, 0xb5, 0xaf, 0xae,
+ 0x58, 0x1e, 0x47, 0xb2, 0x5a, 0x30, 0x50, 0x4a, 0xf9, 0xe4, 0xa3, 0xdf, 0xf8, 0xfb, 0xda, 0x2c,
+ 0xd4, 0x14, 0xc5, 0xfd, 0x1d, 0x71, 0x69, 0x0c, 0xa1, 0x66, 0x7a, 0x6e, 0x10, 0xfa, 0x86, 0x72,
+ 0x39, 0xa9, 0x2c, 0x51, 0x29, 0x2a, 0x9a, 0x31, 0x6a, 0xac, 0xd2, 0xa1, 0x1b, 0x9b, 0x64, 0xb4,
+ 0x89, 0xd3, 0xf0, 0xa4, 0x1c, 0xc5, 0x5c, 0x1f, 0x03, 0x88, 0x94, 0x24, 0xf9, 0x04, 0xaa, 0xcc,
+ 0x00, 0xd7, 0x0e, 0x6e, 0x48, 0x18, 0x56, 0xea, 0xa1, 0xb7, 0x61, 0xde, 0x51, 0x73, 0x3f, 0x09,
+ 0x15, 0xae, 0xd8, 0x79, 0x6b, 0x22, 0x8b, 0x54, 0x74, 0x54, 0xa8, 0x14, 0xe1, 0x24, 0x2d, 0xb4,
+ 0x03, 0xb3, 0x4e, 0x94, 0xb0, 0x4c, 0xe8, 0x71, 0x2f, 0x17, 0x26, 0xcc, 0xb0, 0x70, 0xc7, 0xa7,
+ 0xfc, 0x89, 0x63, 0xfc, 0x94, 0xef, 0xe4, 0x8f, 0x72, 0xce, 0xcf, 0x98, 0x9a, 0xe4, 0x3b, 0x59,
+ 0x14, 0x60, 0x85, 0x4a, 0x8e, 0x73, 0xaa, 0x5a, 0xc8, 0x39, 0x35, 0x84, 0xf3, 0x3e, 0x09, 0xfd,
+ 0xfd, 0xe6, 0xbe, 0xc9, 0x5e, 0xd6, 0xf3, 0x43, 0x96, 0x9b, 0x6c, 0xb6, 0xd8, 0x25, 0x03, 0x3c,
+ 0x8a, 0x0a, 0x67, 0xe1, 0xa7, 0x76, 0x1a, 0xdd, 0xf4, 0xc5, 0x1d, 0x5f, 0x66, 0xa7, 0xb1, 0x87,
+ 0xf7, 0x59, 0x29, 0xfa, 0x38, 0xd4, 0x42, 0x62, 0x6e, 0xbb, 0xb6, 0x69, 0x38, 0xed, 0x96, 0xd0,
+ 0xa8, 0xe2, 0x3d, 0x34, 0x06, 0x61, 0xb5, 0x1e, 0x6a, 0xc0, 0xc4, 0xd0, 0xb6, 0x84, 0x2a, 0xf5,
+ 0xe3, 0xf2, 0x49, 0xdd, 0x76, 0xeb, 0xc1, 0x41, 0xfd, 0xfd, 0x71, 0x08, 0xad, 0xfc, 0x92, 0xab,
+ 0x83, 0x9d, 0xde, 0xd5, 0x70, 0x7f, 0x40, 0x82, 0x95, 0x8d, 0x76, 0x0b, 0xd3, 0xc6, 0x59, 0xce,
+ 0xba, 0xf9, 0x13, 0x38, 0xeb, 0x6e, 0x01, 0xc4, 0x5b, 0x7c, 0x69, 0x6f, 0xdd, 0x7f, 0x9e, 0x82,
+ 0x8b, 0x65, 0x4f, 0x35, 0x59, 0xde, 0x2e, 0xf6, 0xb4, 0xc0, 0xea, 0x56, 0x48, 0xfc, 0x3b, 0x77,
+ 0xd6, 0xef, 0x6e, 0xfb, 0x24, 0xd8, 0xf6, 0x1c, 0xab, 0x60, 0xe2, 0xb0, 0xf8, 0x71, 0x8a, 0x11,
+ 0x8c, 0x38, 0x87, 0x12, 0x6a, 0xc2, 0x52, 0xf4, 0xbc, 0x01, 0xa6, 0x5b, 0xca, 0xd0, 0x0f, 0x42,
+ 0x11, 0x65, 0xc9, 0x9c, 0xb2, 0x6b, 0x69, 0x20, 0x1e, 0xad, 0x9f, 0x46, 0xc2, 0x9f, 0x39, 0x98,
+ 0x64, 0x2f, 0x22, 0x8e, 0x20, 0xe1, 0x6f, 0x1d, 0x8c, 0xd6, 0x57, 0x91, 0xc8, 0x17, 0x14, 0x99,
+ 0x78, 0x4a, 0x21, 0x89, 0x9f, 0x57, 0x1c, 0xad, 0x8f, 0x2c, 0x78, 0xdc, 0x27, 0xa6, 0xd7, 0xef,
+ 0x13, 0xd7, 0xe2, 0x39, 0x1f, 0x0d, 0xbf, 0x67, 0xbb, 0xd7, 0x7c, 0x83, 0x55, 0x14, 0xcf, 0x34,
+ 0x3e, 0x79, 0x78, 0x50, 0x7f, 0x1c, 0x1f, 0x51, 0x0f, 0x1f, 0x89, 0x05, 0xf5, 0xe1, 0xdc, 0x90,
+ 0xa5, 0xb8, 0xf1, 0xdb, 0x6e, 0x48, 0xfc, 0x5d, 0xc3, 0x29, 0xf8, 0x7e, 0x07, 0xe3, 0xdd, 0x8d,
+ 0x24, 0x2a, 0x9c, 0xc6, 0x8d, 0xf6, 0xa9, 0x28, 0x10, 0xdd, 0x51, 0x48, 0x56, 0x8b, 0x67, 0xb6,
+ 0xc3, 0xa3, 0xe8, 0x70, 0x16, 0x0d, 0xfd, 0xef, 0x68, 0x20, 0x0e, 0x61, 0xe4, 0x73, 0xc6, 0x5a,
+ 0xe6, 0x73, 0xc6, 0xa7, 0xf3, 0xac, 0xf7, 0x5f, 0x88, 0xdf, 0x4a, 0xcf, 0x7c, 0xd4, 0x5b, 0xbe,
+ 0x6a, 0x7f, 0x0f, 0x44, 0x63, 0x96, 0x47, 0x6c, 0xac, 0x24, 0x54, 0xc7, 0x3f, 0x2a, 0x1c, 0xa7,
+ 0xc1, 0x9a, 0xc8, 0x4d, 0x83, 0xf5, 0x1d, 0x0d, 0xd2, 0x8f, 0xb9, 0xa1, 0xa7, 0x60, 0x46, 0xdc,
+ 0x4b, 0x12, 0x37, 0x1b, 0x78, 0x84, 0x1a, 0x2f, 0xc2, 0x11, 0x2c, 0x69, 0x3f, 0x95, 0xd1, 0x06,
+ 0xb3, 0x63, 0xaf, 0x8f, 0xd1, 0xcb, 0x0e, 0x16, 0x60, 0x9a, 0x5f, 0xa4, 0x41, 0x3f, 0x9d, 0x19,
+ 0x02, 0x73, 0xab, 0xc4, 0x5d, 0x9d, 0x02, 0x91, 0x2e, 0x89, 0x54, 0x27, 0x95, 0x23, 0x53, 0x9d,
+ 0x74, 0x79, 0x3e, 0xba, 0x32, 0xee, 0xb3, 0x26, 0x6e, 0x8b, 0x84, 0xe8, 0x22, 0x17, 0x1d, 0x1a,
+ 0x26, 0xbc, 0x4a, 0x93, 0x25, 0x72, 0xed, 0xf1, 0x21, 0x50, 0x7c, 0x4b, 0x0b, 0x47, 0xf8, 0x95,
+ 0xe2, 0x8b, 0x0b, 0x53, 0x25, 0x8e, 0x5c, 0xc4, 0xa8, 0x8f, 0x71, 0x71, 0x41, 0xb2, 0xfd, 0x74,
+ 0x2e, 0xdb, 0xf7, 0x60, 0x46, 0x08, 0x0e, 0x21, 0xc5, 0x5e, 0x2c, 0x93, 0x6d, 0x4e, 0xb9, 0x73,
+ 0xca, 0x0b, 0x70, 0x84, 0x9d, 0xee, 0x8d, 0x7d, 0x63, 0xcf, 0xee, 0x0f, 0xfb, 0x4c, 0x76, 0x4d,
+ 0xa9, 0x55, 0x59, 0x31, 0x8e, 0xe0, 0xac, 0x2a, 0x3f, 0xab, 0x62, 0x1a, 0x8f, 0x5a, 0x55, 0xbc,
+ 0xe9, 0x11, 0xc1, 0xd1, 0xe7, 0xa1, 0xda, 0x37, 0xf6, 0xba, 0x43, 0xbf, 0x47, 0x84, 0x3b, 0x29,
+ 0xdf, 0x22, 0x1f, 0x86, 0xb6, 0xb3, 0x42, 0x75, 0xf5, 0xd0, 0x5f, 0x69, 0xbb, 0xe1, 0x1d, 0xbf,
+ 0x1b, 0xfa, 0x32, 0xab, 0xd6, 0xba, 0xc0, 0x82, 0x25, 0x3e, 0xe4, 0xc0, 0x42, 0xdf, 0xd8, 0xdb,
+ 0x70, 0x0d, 0xf9, 0xf4, 0x40, 0xad, 0x20, 0x05, 0xe6, 0x5b, 0x5f, 0x4f, 0xe0, 0xc2, 0x29, 0xdc,
+ 0x19, 0x6e, 0xfc, 0xb9, 0x87, 0xe5, 0xc6, 0x5f, 0x95, 0x61, 0x01, 0xf3, 0x8c, 0x0d, 0x1f, 0xcd,
+ 0x3a, 0xd4, 0x3a, 0xfa, 0xc4, 0xff, 0x0d, 0x79, 0xe2, 0xbf, 0x50, 0xc2, 0xf7, 0x7e, 0xc4, 0x69,
+ 0xff, 0x2e, 0xd4, 0x2c, 0x23, 0x34, 0x78, 0x69, 0xb0, 0x7c, 0xae, 0x84, 0xdb, 0xa0, 0x25, 0xf1,
+ 0x28, 0x09, 0x81, 0x63, 0xdc, 0x58, 0x25, 0x84, 0xee, 0xf0, 0xd4, 0xf4, 0x0e, 0x09, 0xe3, 0x2a,
+ 0xcc, 0x2f, 0xb7, 0xc8, 0xfd, 0x7a, 0x51, 0x22, 0xf9, 0x91, 0x0a, 0x38, 0xbb, 0x1d, 0x55, 0x27,
+ 0x79, 0x68, 0xf2, 0x52, 0x7c, 0xdb, 0x3d, 0x71, 0x9b, 0xe9, 0x6f, 0x69, 0xb0, 0xc8, 0x1f, 0x0d,
+ 0x6a, 0x7a, 0xfd, 0x81, 0xe7, 0x12, 0x3a, 0x31, 0x88, 0x8d, 0xea, 0xcd, 0x12, 0xf2, 0xa1, 0x9b,
+ 0x42, 0x29, 0x0e, 0x24, 0x53, 0xa5, 0x78, 0x84, 0x34, 0xfa, 0xe7, 0x1a, 0x2c, 0xf7, 0x73, 0xf2,
+ 0xed, 0x2e, 0x9f, 0x2f, 0x11, 0x13, 0x75, 0x5c, 0x12, 0x5f, 0xfe, 0xa0, 0xd9, 0x71, 0xb5, 0x70,
+ 0x6e, 0xe7, 0xca, 0x86, 0x4f, 0x96, 0xb9, 0xe6, 0xf5, 0x97, 0x61, 0x31, 0xbd, 0x13, 0xa8, 0xb9,
+ 0xf4, 0xb5, 0x87, 0x9b, 0x4b, 0x5f, 0x7f, 0x09, 0x2e, 0x65, 0xcf, 0x3a, 0xd5, 0x8e, 0xd8, 0xd3,
+ 0x14, 0xc2, 0x20, 0x89, 0x93, 0xc3, 0xd1, 0x42, 0xcc, 0x61, 0x8d, 0x95, 0xef, 0xfe, 0xe8, 0xca,
+ 0xfb, 0xbe, 0xff, 0xa3, 0x2b, 0xef, 0xfb, 0xc1, 0x8f, 0xae, 0xbc, 0xef, 0xcb, 0x87, 0x57, 0xb4,
+ 0xef, 0x1e, 0x5e, 0xd1, 0xbe, 0x7f, 0x78, 0x45, 0xfb, 0xc1, 0xe1, 0x15, 0xed, 0x3f, 0x1e, 0x5e,
+ 0xd1, 0xfe, 0xf6, 0x7f, 0xba, 0xf2, 0xbe, 0xcf, 0x57, 0xa3, 0x2e, 0xfd, 0x79, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xa6, 0x4b, 0xaa, 0xf1, 0x38, 0xaf, 0x00, 0x00,
+}
+
+func (m *Addon) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Addon) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Addon) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *Addons) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Addons) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Addons) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NginxIngress != nil {
+ {
+ size, err := m.NginxIngress.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.KubernetesDashboard != nil {
+ {
+ size, err := m.KubernetesDashboard.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AdmissionPlugin) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AdmissionPlugin) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionPlugin) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Config != nil {
+ {
+ size, err := m.Config.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Alerting) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Alerting) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Alerting) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.EmailReceivers) > 0 {
+ for iNdEx := len(m.EmailReceivers) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EmailReceivers[iNdEx])
+ copy(dAtA[i:], m.EmailReceivers[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmailReceivers[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuditConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuditConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuditConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AuditPolicy != nil {
+ {
+ size, err := m.AuditPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuditPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuditPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuditPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConfigMapRef != nil {
+ {
+ size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AvailabilityZone) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AvailabilityZone) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AvailabilityZone) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.UnavailableVolumeTypes) > 0 {
+ for iNdEx := len(m.UnavailableVolumeTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UnavailableVolumeTypes[iNdEx])
+ copy(dAtA[i:], m.UnavailableVolumeTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnavailableVolumeTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.UnavailableMachineTypes) > 0 {
+ for iNdEx := len(m.UnavailableMachineTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UnavailableMachineTypes[iNdEx])
+ copy(dAtA[i:], m.UnavailableMachineTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnavailableMachineTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucket) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucket) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Seed != nil {
+ i -= len(*m.Seed)
+ copy(dAtA[i:], *m.Seed)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Seed)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.GeneratedSecretRef != nil {
+ {
+ size, err := m.GeneratedSecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x20
+ if m.LastError != nil {
+ {
+ size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LastOperation != nil {
+ {
+ size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ProviderStatus != nil {
+ {
+ size, err := m.ProviderStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntryList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntryList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntryList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntrySpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntrySpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntrySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Seed != nil {
+ i -= len(*m.Seed)
+ copy(dAtA[i:], *m.Seed)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Seed)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.BucketName)
+ copy(dAtA[i:], m.BucketName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BucketName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntryStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntryStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntryStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x18
+ if m.LastError != nil {
+ {
+ size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.LastOperation != nil {
+ {
+ size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CRI) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CRI) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CRI) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ContainerRuntimes) > 0 {
+ for iNdEx := len(m.ContainerRuntimes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ContainerRuntimes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudProfile) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudProfile) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudProfileList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudProfileList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudProfileList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudProfileSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudProfileSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudProfileSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.VolumeTypes) > 0 {
+ for iNdEx := len(m.VolumeTypes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.VolumeTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x42
+ if m.SeedSelector != nil {
+ {
+ size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Regions) > 0 {
+ for iNdEx := len(m.Regions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Regions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.MachineTypes) > 0 {
+ for iNdEx := len(m.MachineTypes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MachineTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.MachineImages) > 0 {
+ for iNdEx := len(m.MachineImages) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MachineImages[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.CABundle != nil {
+ i -= len(*m.CABundle)
+ copy(dAtA[i:], *m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterAutoscaler) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterAutoscaler) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ScanInterval != nil {
+ {
+ size, err := m.ScanInterval.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.ScaleDownUtilizationThreshold != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.ScaleDownUtilizationThreshold))))
+ i--
+ dAtA[i] = 0x29
+ }
+ if m.ScaleDownUnneededTime != nil {
+ {
+ size, err := m.ScaleDownUnneededTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ScaleDownDelayAfterFailure != nil {
+ {
+ size, err := m.ScaleDownDelayAfterFailure.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ScaleDownDelayAfterDelete != nil {
+ {
+ size, err := m.ScaleDownDelayAfterDelete.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ScaleDownDelayAfterAdd != nil {
+ {
+ size, err := m.ScaleDownDelayAfterAdd.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Cloud.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Condition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Condition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Codes) > 0 {
+ for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Codes[iNdEx])
+ copy(dAtA[i:], m.Codes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ContainerRuntime) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ContainerRuntime) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerRuntime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerDeployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerDeployment) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SeedSelector != nil {
+ {
+ size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Policy != nil {
+ i -= len(*m.Policy)
+ copy(dAtA[i:], *m.Policy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Policy)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallationSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallationSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.SeedRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.RegistrationRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallationStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallationStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProviderStatus != nil {
+ {
+ size, err := m.ProviderStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRegistration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRegistration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRegistration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRegistrationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRegistrationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRegistrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRegistrationSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRegistrationSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRegistrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Deployment != nil {
+ {
+ size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerResource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerResource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerResource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Primary != nil {
+ i--
+ if *m.Primary {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ReconcileTimeout != nil {
+ {
+ size, err := m.ReconcileTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.GloballyEnabled != nil {
+ i--
+ if *m.GloballyEnabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DNS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DNS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DNS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Providers) > 0 {
+ for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Domain != nil {
+ i -= len(*m.Domain)
+ copy(dAtA[i:], *m.Domain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Domain)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DNSIncludeExclude) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DNSIncludeExclude) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DNSIncludeExclude) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Exclude) > 0 {
+ for iNdEx := len(m.Exclude) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Exclude[iNdEx])
+ copy(dAtA[i:], m.Exclude[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Exclude[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Include) > 0 {
+ for iNdEx := len(m.Include) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Include[iNdEx])
+ copy(dAtA[i:], m.Include[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Include[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DNSProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DNSProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Zones != nil {
+ {
+ size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Type != nil {
+ i -= len(*m.Type)
+ copy(dAtA[i:], *m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.SecretName != nil {
+ i -= len(*m.SecretName)
+ copy(dAtA[i:], *m.SecretName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretName)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Primary != nil {
+ i--
+ if *m.Primary {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Domains != nil {
+ {
+ size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataVolume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataVolume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Encrypted != nil {
+ i--
+ if *m.Encrypted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ i -= len(m.VolumeSize)
+ copy(dAtA[i:], m.VolumeSize)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize)))
+ i--
+ dAtA[i] = 0x1a
+ if m.Type != nil {
+ i -= len(*m.Type)
+ copy(dAtA[i:], *m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Endpoint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Purpose)
+ copy(dAtA[i:], m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Purpose)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.URL)
+ copy(dAtA[i:], m.URL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ExpirableVersion) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExpirableVersion) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExpirableVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Classification != nil {
+ i -= len(*m.Classification)
+ copy(dAtA[i:], *m.Classification)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Classification)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ExpirationDate != nil {
+ {
+ size, err := m.ExpirationDate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Extension) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Extension) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Disabled != nil {
+ i--
+ if *m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ExtensionResourceState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExtensionResourceState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExtensionResourceState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.State != nil {
+ {
+ size, err := m.State.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Purpose != nil {
+ i -= len(*m.Purpose)
+ copy(dAtA[i:], *m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Name != nil {
+ i -= len(*m.Name)
+ copy(dAtA[i:], *m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Gardener) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Gardener) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Gardener) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GardenerResourceData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GardenerResourceData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GardenerResourceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Hibernation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Hibernation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Hibernation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Schedules) > 0 {
+ for iNdEx := len(m.Schedules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Schedules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Enabled != nil {
+ i--
+ if *m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HibernationSchedule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HibernationSchedule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HibernationSchedule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Location != nil {
+ i -= len(*m.Location)
+ copy(dAtA[i:], *m.Location)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Location)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.End != nil {
+ i -= len(*m.End)
+ copy(dAtA[i:], *m.End)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.End)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Start != nil {
+ i -= len(*m.Start)
+ copy(dAtA[i:], *m.Start)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Start)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HorizontalPodAutoscalerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HorizontalPodAutoscalerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UpscaleDelay != nil {
+ {
+ size, err := m.UpscaleDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Tolerance != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Tolerance))))
+ i--
+ dAtA[i] = 0x31
+ }
+ if m.SyncPeriod != nil {
+ {
+ size, err := m.SyncPeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.InitialReadinessDelay != nil {
+ {
+ size, err := m.InitialReadinessDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.DownscaleStabilization != nil {
+ {
+ size, err := m.DownscaleStabilization.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.DownscaleDelay != nil {
+ {
+ size, err := m.DownscaleDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CPUInitializationPeriod != nil {
+ {
+ size, err := m.CPUInitializationPeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Ingress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Controller.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Domain)
+ copy(dAtA[i:], m.Domain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Domain)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressController) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressController) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressController) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeAPIServerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeAPIServerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeAPIServerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Requests != nil {
+ {
+ size, err := m.Requests.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.WatchCacheSizes != nil {
+ {
+ size, err := m.WatchCacheSizes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.ServiceAccountConfig != nil {
+ {
+ size, err := m.ServiceAccountConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.RuntimeConfig) > 0 {
+ keysForRuntimeConfig := make([]string, 0, len(m.RuntimeConfig))
+ for k := range m.RuntimeConfig {
+ keysForRuntimeConfig = append(keysForRuntimeConfig, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRuntimeConfig)
+ for iNdEx := len(keysForRuntimeConfig) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.RuntimeConfig[string(keysForRuntimeConfig[iNdEx])]
+ baseI := i
+ i--
+ if v {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(keysForRuntimeConfig[iNdEx])
+ copy(dAtA[i:], keysForRuntimeConfig[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRuntimeConfig[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.OIDCConfig != nil {
+ {
+ size, err := m.OIDCConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.EnableBasicAuthentication != nil {
+ i--
+ if *m.EnableBasicAuthentication {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.AuditConfig != nil {
+ {
+ size, err := m.AuditConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.APIAudiences) > 0 {
+ for iNdEx := len(m.APIAudiences) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.APIAudiences[iNdEx])
+ copy(dAtA[i:], m.APIAudiences[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIAudiences[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.AdmissionPlugins) > 0 {
+ for iNdEx := len(m.AdmissionPlugins) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AdmissionPlugins[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeAPIServerRequests) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeAPIServerRequests) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeAPIServerRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxMutatingInflight != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxMutatingInflight))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.MaxNonMutatingInflight != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxNonMutatingInflight))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeControllerManagerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeControllerManagerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeControllerManagerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PodEvictionTimeout != nil {
+ {
+ size, err := m.PodEvictionTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.NodeCIDRMaskSize != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeCIDRMaskSize))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.HorizontalPodAutoscalerConfig != nil {
+ {
+ size, err := m.HorizontalPodAutoscalerConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeProxyConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeProxyConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Mode != nil {
+ i -= len(*m.Mode)
+ copy(dAtA[i:], *m.Mode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Mode)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeSchedulerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeSchedulerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeSchedulerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.KubeMaxPDVols != nil {
+ i -= len(*m.KubeMaxPDVols)
+ copy(dAtA[i:], *m.KubeMaxPDVols)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeMaxPDVols)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SystemReserved != nil {
+ {
+ size, err := m.SystemReserved.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ if m.KubeReserved != nil {
+ {
+ size, err := m.KubeReserved.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.FailSwapOn != nil {
+ i--
+ if *m.FailSwapOn {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x68
+ }
+ if m.ImagePullProgressDeadline != nil {
+ {
+ size, err := m.ImagePullProgressDeadline.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.PodPIDsLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.PodPIDsLimit))
+ i--
+ dAtA[i] = 0x58
+ }
+ if m.MaxPods != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxPods))
+ i--
+ dAtA[i] = 0x50
+ }
+ if m.EvictionSoftGracePeriod != nil {
+ {
+ size, err := m.EvictionSoftGracePeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.EvictionSoft != nil {
+ {
+ size, err := m.EvictionSoft.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.EvictionPressureTransitionPeriod != nil {
+ {
+ size, err := m.EvictionPressureTransitionPeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.EvictionMinimumReclaim != nil {
+ {
+ size, err := m.EvictionMinimumReclaim.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.EvictionMaxPodGracePeriod != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionMaxPodGracePeriod))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.EvictionHard != nil {
+ {
+ size, err := m.EvictionHard.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.CPUManagerPolicy != nil {
+ i -= len(*m.CPUManagerPolicy)
+ copy(dAtA[i:], *m.CPUManagerPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CPUManagerPolicy)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CPUCFSQuota != nil {
+ i--
+ if *m.CPUCFSQuota {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigEviction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigEviction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigEviction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeFSInodesFree != nil {
+ i -= len(*m.NodeFSInodesFree)
+ copy(dAtA[i:], *m.NodeFSInodesFree)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeFSInodesFree)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeFSAvailable != nil {
+ i -= len(*m.NodeFSAvailable)
+ copy(dAtA[i:], *m.NodeFSAvailable)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeFSAvailable)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ImageFSInodesFree != nil {
+ i -= len(*m.ImageFSInodesFree)
+ copy(dAtA[i:], *m.ImageFSInodesFree)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageFSInodesFree)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ImageFSAvailable != nil {
+ i -= len(*m.ImageFSAvailable)
+ copy(dAtA[i:], *m.ImageFSAvailable)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageFSAvailable)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MemoryAvailable != nil {
+ i -= len(*m.MemoryAvailable)
+ copy(dAtA[i:], *m.MemoryAvailable)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MemoryAvailable)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeFSInodesFree != nil {
+ {
+ size, err := m.NodeFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeFSAvailable != nil {
+ {
+ size, err := m.NodeFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ImageFSInodesFree != nil {
+ {
+ size, err := m.ImageFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ImageFSAvailable != nil {
+ {
+ size, err := m.ImageFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MemoryAvailable != nil {
+ {
+ size, err := m.MemoryAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeFSInodesFree != nil {
+ {
+ size, err := m.NodeFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeFSAvailable != nil {
+ {
+ size, err := m.NodeFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ImageFSInodesFree != nil {
+ {
+ size, err := m.ImageFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ImageFSAvailable != nil {
+ {
+ size, err := m.ImageFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MemoryAvailable != nil {
+ {
+ size, err := m.MemoryAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigReserved) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigReserved) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigReserved) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PID != nil {
+ {
+ size, err := m.PID.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.EphemeralStorage != nil {
+ {
+ size, err := m.EphemeralStorage.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Memory != nil {
+ {
+ size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CPU != nil {
+ {
+ size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Kubernetes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Kubernetes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Kubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.VerticalPodAutoscaler != nil {
+ {
+ size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x42
+ if m.Kubelet != nil {
+ {
+ size, err := m.Kubelet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.KubeProxy != nil {
+ {
+ size, err := m.KubeProxy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.KubeScheduler != nil {
+ {
+ size, err := m.KubeScheduler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.KubeControllerManager != nil {
+ {
+ size, err := m.KubeControllerManager.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.KubeAPIServer != nil {
+ {
+ size, err := m.KubeAPIServer.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ClusterAutoscaler != nil {
+ {
+ size, err := m.ClusterAutoscaler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.AllowPrivilegedContainers != nil {
+ i--
+ if *m.AllowPrivilegedContainers {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.FeatureGates) > 0 {
+ keysForFeatureGates := make([]string, 0, len(m.FeatureGates))
+ for k := range m.FeatureGates {
+ keysForFeatureGates = append(keysForFeatureGates, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForFeatureGates)
+ for iNdEx := len(keysForFeatureGates) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.FeatureGates[string(keysForFeatureGates[iNdEx])]
+ baseI := i
+ i--
+ if v {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(keysForFeatureGates[iNdEx])
+ copy(dAtA[i:], keysForFeatureGates[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForFeatureGates[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesDashboard) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesDashboard) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesDashboard) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.AuthenticationMode != nil {
+ i -= len(*m.AuthenticationMode)
+ copy(dAtA[i:], *m.AuthenticationMode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AuthenticationMode)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesSettings) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesSettings) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LastError) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LastError) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LastError) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LastUpdateTime != nil {
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Codes) > 0 {
+ for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Codes[iNdEx])
+ copy(dAtA[i:], m.Codes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.TaskID != nil {
+ i -= len(*m.TaskID)
+ copy(dAtA[i:], *m.TaskID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TaskID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *LastOperation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LastOperation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LastOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.State)
+ copy(dAtA[i:], m.State)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.State)))
+ i--
+ dAtA[i] = 0x22
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Progress))
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Machine) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Machine) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Machine) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Image != nil {
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineControllerManagerSettings) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineControllerManagerSettings) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineControllerManagerSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeConditions) > 0 {
+ for iNdEx := len(m.NodeConditions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.NodeConditions[iNdEx])
+ copy(dAtA[i:], m.NodeConditions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeConditions[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.MaxEvictRetries != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxEvictRetries))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.MachineCreationTimeout != nil {
+ {
+ size, err := m.MachineCreationTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.MachineHealthTimeout != nil {
+ {
+ size, err := m.MachineHealthTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MachineDrainTimeout != nil {
+ {
+ size, err := m.MachineDrainTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineImage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineImage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineImageVersion) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineImageVersion) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineImageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.CRI) > 0 {
+ for iNdEx := len(m.CRI) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.CRI[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ExpirableVersion.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineType) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineType) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Usable != nil {
+ i--
+ if *m.Usable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.Storage != nil {
+ {
+ size, err := m.Storage.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.GPU.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineTypeStorage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineTypeStorage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineTypeStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.StorageSize.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Class)
+ copy(dAtA[i:], m.Class)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Class)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Maintenance) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Maintenance) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Maintenance) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConfineSpecUpdateRollout != nil {
+ i--
+ if *m.ConfineSpecUpdateRollout {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.TimeWindow != nil {
+ {
+ size, err := m.TimeWindow.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.AutoUpdate != nil {
+ {
+ size, err := m.AutoUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MaintenanceAutoUpdate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MaintenanceAutoUpdate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MaintenanceAutoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.MachineImageVersion {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i--
+ if m.KubernetesVersion {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *MaintenanceTimeWindow) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MaintenanceTimeWindow) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MaintenanceTimeWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.End)
+ copy(dAtA[i:], m.End)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.End)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Begin)
+ copy(dAtA[i:], m.Begin)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Begin)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Monitoring) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Monitoring) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Monitoring) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Alerting != nil {
+ {
+ size, err := m.Alerting.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedResourceReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedResourceReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedResourceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ResourceRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Networking) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Networking) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Networking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Services != nil {
+ i -= len(*m.Services)
+ copy(dAtA[i:], *m.Services)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Services)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Nodes != nil {
+ i -= len(*m.Nodes)
+ copy(dAtA[i:], *m.Nodes)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Nodes)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Pods != nil {
+ i -= len(*m.Pods)
+ copy(dAtA[i:], *m.Pods)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pods)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxIngress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxIngress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if m.ExternalTrafficPolicy != nil {
+ i -= len(*m.ExternalTrafficPolicy)
+ copy(dAtA[i:], *m.ExternalTrafficPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExternalTrafficPolicy)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Config) > 0 {
+ keysForConfig := make([]string, 0, len(m.Config))
+ for k := range m.Config {
+ keysForConfig = append(keysForConfig, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForConfig)
+ for iNdEx := len(keysForConfig) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Config[string(keysForConfig[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForConfig[iNdEx])
+ copy(dAtA[i:], keysForConfig[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConfig[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.LoadBalancerSourceRanges) > 0 {
+ for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.LoadBalancerSourceRanges[iNdEx])
+ copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *OIDCConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OIDCConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OIDCConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UsernamePrefix != nil {
+ i -= len(*m.UsernamePrefix)
+ copy(dAtA[i:], *m.UsernamePrefix)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernamePrefix)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.UsernameClaim != nil {
+ i -= len(*m.UsernameClaim)
+ copy(dAtA[i:], *m.UsernameClaim)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernameClaim)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.SigningAlgs) > 0 {
+ for iNdEx := len(m.SigningAlgs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SigningAlgs[iNdEx])
+ copy(dAtA[i:], m.SigningAlgs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SigningAlgs[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.RequiredClaims) > 0 {
+ keysForRequiredClaims := make([]string, 0, len(m.RequiredClaims))
+ for k := range m.RequiredClaims {
+ keysForRequiredClaims = append(keysForRequiredClaims, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims)
+ for iNdEx := len(keysForRequiredClaims) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.RequiredClaims[string(keysForRequiredClaims[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForRequiredClaims[iNdEx])
+ copy(dAtA[i:], keysForRequiredClaims[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequiredClaims[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.IssuerURL != nil {
+ i -= len(*m.IssuerURL)
+ copy(dAtA[i:], *m.IssuerURL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IssuerURL)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.GroupsPrefix != nil {
+ i -= len(*m.GroupsPrefix)
+ copy(dAtA[i:], *m.GroupsPrefix)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsPrefix)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.GroupsClaim != nil {
+ i -= len(*m.GroupsClaim)
+ copy(dAtA[i:], *m.GroupsClaim)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsClaim)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ClientID != nil {
+ i -= len(*m.ClientID)
+ copy(dAtA[i:], *m.ClientID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClientID)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ClientAuthentication != nil {
+ {
+ size, err := m.ClientAuthentication.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CABundle != nil {
+ i -= len(*m.CABundle)
+ copy(dAtA[i:], *m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *OpenIDConnectClientAuthentication) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OpenIDConnectClientAuthentication) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OpenIDConnectClientAuthentication) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Secret != nil {
+ i -= len(*m.Secret)
+ copy(dAtA[i:], *m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Secret)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ExtraConfig) > 0 {
+ keysForExtraConfig := make([]string, 0, len(m.ExtraConfig))
+ for k := range m.ExtraConfig {
+ keysForExtraConfig = append(keysForExtraConfig, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig)
+ for iNdEx := len(keysForExtraConfig) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.ExtraConfig[string(keysForExtraConfig[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForExtraConfig[iNdEx])
+ copy(dAtA[i:], keysForExtraConfig[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtraConfig[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Plant) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Plant) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Plant) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PlantList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlantList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PlantList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PlantSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlantSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PlantSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Endpoints) > 0 {
+ for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PlantStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlantStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PlantStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterInfo != nil {
+ {
+ size, err := m.ClusterInfo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ObservedGeneration != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Project) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Project) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectMember) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectMember) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Roles) > 0 {
+ for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Roles[iNdEx])
+ copy(dAtA[i:], m.Roles[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Roles[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Subject.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Tolerations != nil {
+ {
+ size, err := m.Tolerations.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Namespace != nil {
+ i -= len(*m.Namespace)
+ copy(dAtA[i:], *m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Purpose != nil {
+ i -= len(*m.Purpose)
+ copy(dAtA[i:], *m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Owner != nil {
+ {
+ size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Description != nil {
+ i -= len(*m.Description)
+ copy(dAtA[i:], *m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CreatedBy != nil {
+ {
+ size, err := m.CreatedBy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.StaleAutoDeleteTimestamp != nil {
+ {
+ size, err := m.StaleAutoDeleteTimestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.StaleSinceTimestamp != nil {
+ {
+ size, err := m.StaleSinceTimestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Phase)
+ copy(dAtA[i:], m.Phase)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+ i--
+ dAtA[i] = 0x12
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectTolerations) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectTolerations) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectTolerations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Whitelist) > 0 {
+ for iNdEx := len(m.Whitelist) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Whitelist[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Defaults) > 0 {
+ for iNdEx := len(m.Defaults) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Defaults[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Provider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Provider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Workers) > 0 {
+ for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.InfrastructureConfig != nil {
+ {
+ size, err := m.InfrastructureConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ControlPlaneConfig != nil {
+ {
+ size, err := m.ControlPlaneConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Quota) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Quota) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *QuotaList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QuotaList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *QuotaSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QuotaSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Metrics) > 0 {
+ keysForMetrics := make([]string, 0, len(m.Metrics))
+ for k := range m.Metrics {
+ keysForMetrics = append(keysForMetrics, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics)
+ for iNdEx := len(keysForMetrics) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Metrics[k8s_io_api_core_v1.ResourceName(keysForMetrics[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForMetrics[iNdEx])
+ copy(dAtA[i:], keysForMetrics[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetrics[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.ClusterLifetimeDays != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ClusterLifetimeDays))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Region) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Region) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Region) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ keysForLabels := make([]string, 0, len(m.Labels))
+ for k := range m.Labels {
+ keysForLabels = append(keysForLabels, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Labels[string(keysForLabels[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForLabels[iNdEx])
+ copy(dAtA[i:], keysForLabels[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Zones) > 0 {
+ for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Zones[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.CrossVersionObjectReference.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceWatchCacheSize) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceWatchCacheSize) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceWatchCacheSize) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CacheSize))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.Resource)
+ copy(dAtA[i:], m.Resource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ i--
+ dAtA[i] = 0x12
+ if m.APIGroup != nil {
+ i -= len(*m.APIGroup)
+ copy(dAtA[i:], *m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Quotas) > 0 {
+ for iNdEx := len(m.Quotas) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Quotas[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretBindingList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretBindingList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Seed) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Seed) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Seed) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedBackup) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedBackup) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedBackup) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if m.Region != nil {
+ i -= len(*m.Region)
+ copy(dAtA[i:], *m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Region)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Provider)
+ copy(dAtA[i:], m.Provider)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provider)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedDNS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedDNS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Provider != nil {
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.IngressDomain != nil {
+ i -= len(*m.IngressDomain)
+ copy(dAtA[i:], *m.IngressDomain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressDomain)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedDNSProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedDNSProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedDNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Zones != nil {
+ {
+ size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Domains != nil {
+ {
+ size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedNetworks) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedNetworks) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedNetworks) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ShootDefaults != nil {
+ {
+ size, err := m.ShootDefaults.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.Services)
+ copy(dAtA[i:], m.Services)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Services)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Pods)
+ copy(dAtA[i:], m.Pods)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pods)))
+ i--
+ dAtA[i] = 0x12
+ if m.Nodes != nil {
+ i -= len(*m.Nodes)
+ copy(dAtA[i:], *m.Nodes)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Nodes)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x1a
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ProviderTypes) > 0 {
+ for iNdEx := len(m.ProviderTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ProviderTypes[iNdEx])
+ copy(dAtA[i:], m.ProviderTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.LabelSelector != nil {
+ {
+ size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingExcessCapacityReservation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingExcessCapacityReservation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingExcessCapacityReservation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingLoadBalancerServices) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingLoadBalancerServices) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingLoadBalancerServices) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingScheduling) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingScheduling) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Visible {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingShootDNS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingShootDNS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingShootDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettings) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettings) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.VerticalPodAutoscaler != nil {
+ {
+ size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.LoadBalancerServices != nil {
+ {
+ size, err := m.LoadBalancerServices.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ShootDNS != nil {
+ {
+ size, err := m.ShootDNS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Scheduling != nil {
+ {
+ size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ExcessCapacityReservation != nil {
+ {
+ size, err := m.ExcessCapacityReservation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Ingress != nil {
+ {
+ size, err := m.Ingress.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.Settings != nil {
+ {
+ size, err := m.Settings.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.Volume != nil {
+ {
+ size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.Taints) > 0 {
+ for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.SecretRef != nil {
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.Networks.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.DNS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.BlockCIDRs) > 0 {
+ for iNdEx := len(m.BlockCIDRs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.BlockCIDRs[iNdEx])
+ copy(dAtA[i:], m.BlockCIDRs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BlockCIDRs[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Backup != nil {
+ {
+ size, err := m.Backup.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterIdentity != nil {
+ i -= len(*m.ClusterIdentity)
+ copy(dAtA[i:], *m.ClusterIdentity)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x20
+ if m.KubernetesVersion != nil {
+ i -= len(*m.KubernetesVersion)
+ copy(dAtA[i:], *m.KubernetesVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubernetesVersion)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Gardener != nil {
+ {
+ size, err := m.Gardener.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedTaint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedTaint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Value != nil {
+ i -= len(*m.Value)
+ copy(dAtA[i:], *m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedVolume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedVolume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Providers) > 0 {
+ for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.MinimumSize != nil {
+ {
+ size, err := m.MinimumSize.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedVolumeProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedVolumeProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedVolumeProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Purpose)
+ copy(dAtA[i:], m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Purpose)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccountConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceAccountConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccountConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SigningKeySecret != nil {
+ {
+ size, err := m.SigningKeySecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Issuer != nil {
+ i -= len(*m.Issuer)
+ copy(dAtA[i:], *m.Issuer)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Issuer)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Shoot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Shoot) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Shoot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootMachineImage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootMachineImage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootMachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Version != nil {
+ i -= len(*m.Version)
+ copy(dAtA[i:], *m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootNetworks) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootNetworks) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootNetworks) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Services != nil {
+ i -= len(*m.Services)
+ copy(dAtA[i:], *m.Services)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Services)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Pods != nil {
+ i -= len(*m.Pods)
+ copy(dAtA[i:], *m.Pods)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pods)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Tolerations) > 0 {
+ for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ }
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ }
+ if m.SeedSelector != nil {
+ {
+ size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ if m.SeedName != nil {
+ i -= len(*m.SeedName)
+ copy(dAtA[i:], *m.SeedName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SeedName)))
+ i--
+ dAtA[i] = 0x72
+ }
+ i -= len(m.SecretBindingName)
+ copy(dAtA[i:], m.SecretBindingName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretBindingName)))
+ i--
+ dAtA[i] = 0x6a
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x62
+ if m.Purpose != nil {
+ i -= len(*m.Purpose)
+ copy(dAtA[i:], *m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ if m.Monitoring != nil {
+ {
+ size, err := m.Monitoring.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.Maintenance != nil {
+ {
+ size, err := m.Maintenance.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ {
+ size, err := m.Networking.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if m.Hibernation != nil {
+ {
+ size, err := m.Hibernation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Extensions) > 0 {
+ for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Extensions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.DNS != nil {
+ {
+ size, err := m.DNS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.CloudProfileName)
+ copy(dAtA[i:], m.CloudProfileName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CloudProfileName)))
+ i--
+ dAtA[i] = 0x12
+ if m.Addons != nil {
+ {
+ size, err := m.Addons.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootStateList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootStateList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootStateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootStateSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootStateSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootStateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Extensions) > 0 {
+ for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Extensions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Gardener) > 0 {
+ for iNdEx := len(m.Gardener) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Gardener[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterIdentity != nil {
+ i -= len(*m.ClusterIdentity)
+ copy(dAtA[i:], *m.ClusterIdentity)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0x62
+ i -= len(m.TechnicalID)
+ copy(dAtA[i:], m.TechnicalID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TechnicalID)))
+ i--
+ dAtA[i] = 0x5a
+ if m.Seed != nil {
+ i -= len(*m.Seed)
+ copy(dAtA[i:], *m.Seed)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Seed)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.RetryCycleStartTime != nil {
+ {
+ size, err := m.RetryCycleStartTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x40
+ if len(m.LastErrors) > 0 {
+ for iNdEx := len(m.LastErrors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.LastErrors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.LastError != nil {
+ {
+ size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.LastOperation != nil {
+ {
+ size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i--
+ if m.IsHibernated {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.Gardener.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Constraints) > 0 {
+ for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Toleration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Toleration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Value != nil {
+ i -= len(*m.Value)
+ copy(dAtA[i:], *m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *VerticalPodAutoscaler) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *VerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RecommenderInterval != nil {
+ {
+ size, err := m.RecommenderInterval.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.UpdaterInterval != nil {
+ {
+ size, err := m.UpdaterInterval.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.RecommendationMarginFraction != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.RecommendationMarginFraction))))
+ i--
+ dAtA[i] = 0x31
+ }
+ if m.EvictionTolerance != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionTolerance))))
+ i--
+ dAtA[i] = 0x29
+ }
+ if m.EvictionRateLimit != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionRateLimit))))
+ i--
+ dAtA[i] = 0x21
+ }
+ if m.EvictionRateBurst != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionRateBurst))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.EvictAfterOOMThreshold != nil {
+ {
+ size, err := m.EvictAfterOOMThreshold.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *Volume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Volume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Encrypted != nil {
+ i--
+ if *m.Encrypted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ i -= len(m.VolumeSize)
+ copy(dAtA[i:], m.VolumeSize)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize)))
+ i--
+ dAtA[i] = 0x1a
+ if m.Type != nil {
+ i -= len(*m.Type)
+ copy(dAtA[i:], *m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Name != nil {
+ i -= len(*m.Name)
+ copy(dAtA[i:], *m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *VolumeType) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *VolumeType) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Usable != nil {
+ i--
+ if *m.Usable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Class)
+ copy(dAtA[i:], m.Class)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Class)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchCacheSizes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCacheSizes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCacheSizes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Default != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Default))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Worker) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Worker) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Worker) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MachineControllerManagerSettings != nil {
+ {
+ size, err := m.MachineControllerManagerSettings.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.SystemComponents != nil {
+ {
+ size, err := m.SystemComponents.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if len(m.Zones) > 0 {
+ for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Zones[iNdEx])
+ copy(dAtA[i:], m.Zones[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Zones[iNdEx])))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ }
+ if m.KubeletDataVolumeName != nil {
+ i -= len(*m.KubeletDataVolumeName)
+ copy(dAtA[i:], *m.KubeletDataVolumeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeletDataVolumeName)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.DataVolumes) > 0 {
+ for iNdEx := len(m.DataVolumes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DataVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ }
+ if m.Volume != nil {
+ {
+ size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if len(m.Taints) > 0 {
+ for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ if m.MaxSurge != nil {
+ {
+ size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Minimum))
+ i--
+ dAtA[i] = 0x48
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Maximum))
+ i--
+ dAtA[i] = 0x40
+ {
+ size, err := m.Machine.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x32
+ if len(m.Labels) > 0 {
+ keysForLabels := make([]string, 0, len(m.Labels))
+ for k := range m.Labels {
+ keysForLabels = append(keysForLabels, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Labels[string(keysForLabels[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForLabels[iNdEx])
+ copy(dAtA[i:], keysForLabels[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Kubernetes != nil {
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.CRI != nil {
+ {
+ size, err := m.CRI.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CABundle != nil {
+ i -= len(*m.CABundle)
+ copy(dAtA[i:], *m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WorkerKubernetes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkerKubernetes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkerKubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Kubelet != nil {
+ {
+ size, err := m.Kubelet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WorkerSystemComponents) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkerSystemComponents) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkerSystemComponents) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Allow {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Addon) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *Addons) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.KubernetesDashboard != nil {
+ l = m.KubernetesDashboard.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NginxIngress != nil {
+ l = m.NginxIngress.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AdmissionPlugin) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Config != nil {
+ l = m.Config.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Alerting) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.EmailReceivers) > 0 {
+ for _, s := range m.EmailReceivers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuditConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AuditPolicy != nil {
+ l = m.AuditPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AuditPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ConfigMapRef != nil {
+ l = m.ConfigMapRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AvailabilityZone) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.UnavailableMachineTypes) > 0 {
+ for _, s := range m.UnavailableMachineTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.UnavailableVolumeTypes) > 0 {
+ for _, s := range m.UnavailableVolumeTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BackupBucket) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BackupBucketList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BackupBucketProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BackupBucketSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Seed != nil {
+ l = len(*m.Seed)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BackupBucketStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ProviderStatus != nil {
+ l = m.ProviderStatus.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastOperation != nil {
+ l = m.LastOperation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastError != nil {
+ l = m.LastError.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.GeneratedSecretRef != nil {
+ l = m.GeneratedSecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BackupEntry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BackupEntryList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BackupEntrySpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.BucketName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Seed != nil {
+ l = len(*m.Seed)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BackupEntryStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LastOperation != nil {
+ l = m.LastOperation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastError != nil {
+ l = m.LastError.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ return n
+}
+
+func (m *CRI) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ContainerRuntimes) > 0 {
+ for _, e := range m.ContainerRuntimes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CloudInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CloudProfile) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CloudProfileList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CloudProfileSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CABundle != nil {
+ l = len(*m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.MachineImages) > 0 {
+ for _, e := range m.MachineImages {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.MachineTypes) > 0 {
+ for _, e := range m.MachineTypes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Regions) > 0 {
+ for _, e := range m.Regions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.SeedSelector != nil {
+ l = m.SeedSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeTypes) > 0 {
+ for _, e := range m.VolumeTypes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterAutoscaler) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ScaleDownDelayAfterAdd != nil {
+ l = m.ScaleDownDelayAfterAdd.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownDelayAfterDelete != nil {
+ l = m.ScaleDownDelayAfterDelete.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownDelayAfterFailure != nil {
+ l = m.ScaleDownDelayAfterFailure.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownUnneededTime != nil {
+ l = m.ScaleDownUnneededTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownUtilizationThreshold != nil {
+ n += 9
+ }
+ if m.ScanInterval != nil {
+ l = m.ScanInterval.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ClusterInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Cloud.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Condition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Codes) > 0 {
+ for _, s := range m.Codes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ContainerRuntime) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerDeployment) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Policy != nil {
+ l = len(*m.Policy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SeedSelector != nil {
+ l = m.SeedSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerInstallation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ControllerInstallationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ControllerInstallationSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.RegistrationRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SeedRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ControllerInstallationStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ProviderStatus != nil {
+ l = m.ProviderStatus.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerRegistration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ControllerRegistrationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ControllerRegistrationSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Deployment != nil {
+ l = m.Deployment.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerResource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.GloballyEnabled != nil {
+ n += 2
+ }
+ if m.ReconcileTimeout != nil {
+ l = m.ReconcileTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Primary != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *DNS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Domain != nil {
+ l = len(*m.Domain)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Providers) > 0 {
+ for _, e := range m.Providers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DNSIncludeExclude) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Include) > 0 {
+ for _, s := range m.Include {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Exclude) > 0 {
+ for _, s := range m.Exclude {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DNSProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Domains != nil {
+ l = m.Domains.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Primary != nil {
+ n += 2
+ }
+ if m.SecretName != nil {
+ l = len(*m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Type != nil {
+ l = len(*m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Zones != nil {
+ l = m.Zones.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DataVolume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Type != nil {
+ l = len(*m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.VolumeSize)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Encrypted != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *Endpoint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.URL)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ExpirableVersion) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ExpirationDate != nil {
+ l = m.ExpirationDate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Classification != nil {
+ l = len(*m.Classification)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Extension) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Disabled != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *ExtensionResourceState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Name != nil {
+ l = len(*m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Purpose != nil {
+ l = len(*m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.State != nil {
+ l = m.State.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Gardener) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GardenerResourceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Hibernation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Enabled != nil {
+ n += 2
+ }
+ if len(m.Schedules) > 0 {
+ for _, e := range m.Schedules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HibernationSchedule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Start != nil {
+ l = len(*m.Start)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.End != nil {
+ l = len(*m.End)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Location != nil {
+ l = len(*m.Location)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscalerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CPUInitializationPeriod != nil {
+ l = m.CPUInitializationPeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.DownscaleDelay != nil {
+ l = m.DownscaleDelay.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.DownscaleStabilization != nil {
+ l = m.DownscaleStabilization.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.InitialReadinessDelay != nil {
+ l = m.InitialReadinessDelay.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SyncPeriod != nil {
+ l = m.SyncPeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Tolerance != nil {
+ n += 9
+ }
+ if m.UpscaleDelay != nil {
+ l = m.UpscaleDelay.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Ingress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Domain)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Controller.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressController) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeAPIServerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AdmissionPlugins) > 0 {
+ for _, e := range m.AdmissionPlugins {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.APIAudiences) > 0 {
+ for _, s := range m.APIAudiences {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.AuditConfig != nil {
+ l = m.AuditConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EnableBasicAuthentication != nil {
+ n += 2
+ }
+ if m.OIDCConfig != nil {
+ l = m.OIDCConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.RuntimeConfig) > 0 {
+ for k, v := range m.RuntimeConfig {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.ServiceAccountConfig != nil {
+ l = m.ServiceAccountConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.WatchCacheSizes != nil {
+ l = m.WatchCacheSizes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Requests != nil {
+ l = m.Requests.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeAPIServerRequests) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxNonMutatingInflight != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxNonMutatingInflight))
+ }
+ if m.MaxMutatingInflight != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxMutatingInflight))
+ }
+ return n
+}
+
+func (m *KubeControllerManagerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.HorizontalPodAutoscalerConfig != nil {
+ l = m.HorizontalPodAutoscalerConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeCIDRMaskSize != nil {
+ n += 1 + sovGenerated(uint64(*m.NodeCIDRMaskSize))
+ }
+ if m.PodEvictionTimeout != nil {
+ l = m.PodEvictionTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeProxyConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Mode != nil {
+ l = len(*m.Mode)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeSchedulerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.KubeMaxPDVols != nil {
+ l = len(*m.KubeMaxPDVols)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.CPUCFSQuota != nil {
+ n += 2
+ }
+ if m.CPUManagerPolicy != nil {
+ l = len(*m.CPUManagerPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionHard != nil {
+ l = m.EvictionHard.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionMaxPodGracePeriod != nil {
+ n += 1 + sovGenerated(uint64(*m.EvictionMaxPodGracePeriod))
+ }
+ if m.EvictionMinimumReclaim != nil {
+ l = m.EvictionMinimumReclaim.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionPressureTransitionPeriod != nil {
+ l = m.EvictionPressureTransitionPeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionSoft != nil {
+ l = m.EvictionSoft.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionSoftGracePeriod != nil {
+ l = m.EvictionSoftGracePeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxPods != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxPods))
+ }
+ if m.PodPIDsLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.PodPIDsLimit))
+ }
+ if m.ImagePullProgressDeadline != nil {
+ l = m.ImagePullProgressDeadline.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.FailSwapOn != nil {
+ n += 2
+ }
+ if m.KubeReserved != nil {
+ l = m.KubeReserved.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SystemReserved != nil {
+ l = m.SystemReserved.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigEviction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemoryAvailable != nil {
+ l = len(*m.MemoryAvailable)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSAvailable != nil {
+ l = len(*m.ImageFSAvailable)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSInodesFree != nil {
+ l = len(*m.ImageFSInodesFree)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSAvailable != nil {
+ l = len(*m.NodeFSAvailable)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSInodesFree != nil {
+ l = len(*m.NodeFSInodesFree)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemoryAvailable != nil {
+ l = m.MemoryAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSAvailable != nil {
+ l = m.ImageFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSInodesFree != nil {
+ l = m.ImageFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSAvailable != nil {
+ l = m.NodeFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSInodesFree != nil {
+ l = m.NodeFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemoryAvailable != nil {
+ l = m.MemoryAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSAvailable != nil {
+ l = m.ImageFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSInodesFree != nil {
+ l = m.ImageFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSAvailable != nil {
+ l = m.NodeFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSInodesFree != nil {
+ l = m.NodeFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigReserved) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CPU != nil {
+ l = m.CPU.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Memory != nil {
+ l = m.Memory.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EphemeralStorage != nil {
+ l = m.EphemeralStorage.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PID != nil {
+ l = m.PID.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Kubernetes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AllowPrivilegedContainers != nil {
+ n += 2
+ }
+ if m.ClusterAutoscaler != nil {
+ l = m.ClusterAutoscaler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeAPIServer != nil {
+ l = m.KubeAPIServer.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeControllerManager != nil {
+ l = m.KubeControllerManager.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeScheduler != nil {
+ l = m.KubeScheduler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeProxy != nil {
+ l = m.KubeProxy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Kubelet != nil {
+ l = m.Kubelet.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.VerticalPodAutoscaler != nil {
+ l = m.VerticalPodAutoscaler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubernetesConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.FeatureGates) > 0 {
+ for k, v := range m.FeatureGates {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *KubernetesDashboard) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AuthenticationMode != nil {
+ l = len(*m.AuthenticationMode)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Addon.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *KubernetesInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *KubernetesSettings) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LastError) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TaskID != nil {
+ l = len(*m.TaskID)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Codes) > 0 {
+ for _, s := range m.Codes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.LastUpdateTime != nil {
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *LastOperation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Progress))
+ l = len(m.State)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Machine) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Image != nil {
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *MachineControllerManagerSettings) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MachineDrainTimeout != nil {
+ l = m.MachineDrainTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MachineHealthTimeout != nil {
+ l = m.MachineHealthTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MachineCreationTimeout != nil {
+ l = m.MachineCreationTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxEvictRetries != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxEvictRetries))
+ }
+ if len(m.NodeConditions) > 0 {
+ for _, s := range m.NodeConditions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MachineImage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MachineImageVersion) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ExpirableVersion.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.CRI) > 0 {
+ for _, e := range m.CRI {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MachineType) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.CPU.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.GPU.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Memory.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Storage != nil {
+ l = m.Storage.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Usable != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *MachineTypeStorage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Class)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.StorageSize.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Maintenance) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AutoUpdate != nil {
+ l = m.AutoUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.TimeWindow != nil {
+ l = m.TimeWindow.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ConfineSpecUpdateRollout != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *MaintenanceAutoUpdate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ return n
+}
+
+func (m *MaintenanceTimeWindow) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Begin)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.End)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Monitoring) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Alerting != nil {
+ l = m.Alerting.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NamedResourceReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ResourceRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Networking) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Pods != nil {
+ l = len(*m.Pods)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Nodes != nil {
+ l = len(*m.Nodes)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Services != nil {
+ l = len(*m.Services)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NginxIngress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.LoadBalancerSourceRanges) > 0 {
+ for _, s := range m.LoadBalancerSourceRanges {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for k, v := range m.Config {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.ExternalTrafficPolicy != nil {
+ l = len(*m.ExternalTrafficPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Addon.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *OIDCConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CABundle != nil {
+ l = len(*m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ClientAuthentication != nil {
+ l = m.ClientAuthentication.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ClientID != nil {
+ l = len(*m.ClientID)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupsClaim != nil {
+ l = len(*m.GroupsClaim)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupsPrefix != nil {
+ l = len(*m.GroupsPrefix)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.IssuerURL != nil {
+ l = len(*m.IssuerURL)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.RequiredClaims) > 0 {
+ for k, v := range m.RequiredClaims {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.SigningAlgs) > 0 {
+ for _, s := range m.SigningAlgs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.UsernameClaim != nil {
+ l = len(*m.UsernameClaim)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.UsernamePrefix != nil {
+ l = len(*m.UsernamePrefix)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *OpenIDConnectClientAuthentication) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExtraConfig) > 0 {
+ for k, v := range m.ExtraConfig {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.Secret != nil {
+ l = len(*m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Plant) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PlantList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PlantSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Endpoints) > 0 {
+ for _, e := range m.Endpoints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PlantStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ObservedGeneration != nil {
+ n += 1 + sovGenerated(uint64(*m.ObservedGeneration))
+ }
+ if m.ClusterInfo != nil {
+ l = m.ClusterInfo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Project) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ProjectList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ProjectMember) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Subject.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Role)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ProjectSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CreatedBy != nil {
+ l = m.CreatedBy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Description != nil {
+ l = len(*m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Owner != nil {
+ l = m.Owner.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Purpose != nil {
+ l = len(*m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Namespace != nil {
+ l = len(*m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Tolerations != nil {
+ l = m.Tolerations.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ProjectStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.StaleSinceTimestamp != nil {
+ l = m.StaleSinceTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.StaleAutoDeleteTimestamp != nil {
+ l = m.StaleAutoDeleteTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ProjectTolerations) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Defaults) > 0 {
+ for _, e := range m.Defaults {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Whitelist) > 0 {
+ for _, e := range m.Whitelist {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Provider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ControlPlaneConfig != nil {
+ l = m.ControlPlaneConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.InfrastructureConfig != nil {
+ l = m.InfrastructureConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Workers) > 0 {
+ for _, e := range m.Workers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Quota) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *QuotaList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *QuotaSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ClusterLifetimeDays != nil {
+ n += 1 + sovGenerated(uint64(*m.ClusterLifetimeDays))
+ }
+ if len(m.Metrics) > 0 {
+ for k, v := range m.Metrics {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = m.Scope.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Region) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Zones) > 0 {
+ for _, e := range m.Zones {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ResourceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.CrossVersionObjectReference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceWatchCacheSize) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.APIGroup != nil {
+ l = len(*m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.CacheSize))
+ return n
+}
+
+func (m *SecretBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Quotas) > 0 {
+ for _, e := range m.Quotas {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SecretBindingList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Seed) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SeedBackup) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Provider)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Region != nil {
+ l = len(*m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SeedDNS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.IngressDomain != nil {
+ l = len(*m.IngressDomain)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Provider != nil {
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedDNSProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Domains != nil {
+ l = m.Domains.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Zones != nil {
+ l = m.Zones.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedNetworks) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Nodes != nil {
+ l = len(*m.Nodes)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Pods)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Services)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ShootDefaults != nil {
+ l = m.ShootDefaults.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SeedSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LabelSelector != nil {
+ l = m.LabelSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ProviderTypes) > 0 {
+ for _, s := range m.ProviderTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedSettingExcessCapacityReservation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettingLoadBalancerServices) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *SeedSettingScheduling) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettingShootDNS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettings) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ExcessCapacityReservation != nil {
+ l = m.ExcessCapacityReservation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Scheduling != nil {
+ l = m.Scheduling.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ShootDNS != nil {
+ l = m.ShootDNS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LoadBalancerServices != nil {
+ l = m.LoadBalancerServices.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.VerticalPodAutoscaler != nil {
+ l = m.VerticalPodAutoscaler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Backup != nil {
+ l = m.Backup.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.BlockCIDRs) > 0 {
+ for _, s := range m.BlockCIDRs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.DNS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Networks.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Taints) > 0 {
+ for _, e := range m.Taints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Volume != nil {
+ l = m.Volume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Settings != nil {
+ l = m.Settings.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Ingress != nil {
+ l = m.Ingress.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Gardener != nil {
+ l = m.Gardener.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubernetesVersion != nil {
+ l = len(*m.KubernetesVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.ClusterIdentity != nil {
+ l = len(*m.ClusterIdentity)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedTaint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Value != nil {
+ l = len(*m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedVolume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MinimumSize != nil {
+ l = m.MinimumSize.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Providers) > 0 {
+ for _, e := range m.Providers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedVolumeProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ServiceAccountConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Issuer != nil {
+ l = len(*m.Issuer)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SigningKeySecret != nil {
+ l = m.SigningKeySecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Shoot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ShootList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ShootMachineImage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Version != nil {
+ l = len(*m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ShootNetworks) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pods != nil {
+ l = len(*m.Pods)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Services != nil {
+ l = len(*m.Services)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ShootSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Addons != nil {
+ l = m.Addons.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.CloudProfileName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.DNS != nil {
+ l = m.DNS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Extensions) > 0 {
+ for _, e := range m.Extensions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Hibernation != nil {
+ l = m.Hibernation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Networking.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Maintenance != nil {
+ l = m.Maintenance.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Monitoring != nil {
+ l = m.Monitoring.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Purpose != nil {
+ l = len(*m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SecretBindingName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SeedName != nil {
+ l = len(*m.SeedName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SeedSelector != nil {
+ l = m.SeedSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Tolerations) > 0 {
+ for _, e := range m.Tolerations {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ShootState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ShootStateList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ShootStateSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Gardener) > 0 {
+ for _, e := range m.Gardener {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Extensions) > 0 {
+ for _, e := range m.Extensions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ShootStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Constraints) > 0 {
+ for _, e := range m.Constraints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Gardener.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.LastOperation != nil {
+ l = m.LastOperation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastError != nil {
+ l = m.LastError.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.LastErrors) > 0 {
+ for _, e := range m.LastErrors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.RetryCycleStartTime != nil {
+ l = m.RetryCycleStartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Seed != nil {
+ l = len(*m.Seed)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.TechnicalID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ClusterIdentity != nil {
+ l = len(*m.ClusterIdentity)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Toleration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Value != nil {
+ l = len(*m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *VerticalPodAutoscaler) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ if m.EvictAfterOOMThreshold != nil {
+ l = m.EvictAfterOOMThreshold.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionRateBurst != nil {
+ n += 1 + sovGenerated(uint64(*m.EvictionRateBurst))
+ }
+ if m.EvictionRateLimit != nil {
+ n += 9
+ }
+ if m.EvictionTolerance != nil {
+ n += 9
+ }
+ if m.RecommendationMarginFraction != nil {
+ n += 9
+ }
+ if m.UpdaterInterval != nil {
+ l = m.UpdaterInterval.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RecommenderInterval != nil {
+ l = m.RecommenderInterval.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Volume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Name != nil {
+ l = len(*m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Type != nil {
+ l = len(*m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.VolumeSize)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Encrypted != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *VolumeType) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Class)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Usable != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *WatchCacheSizes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Default != nil {
+ n += 1 + sovGenerated(uint64(*m.Default))
+ }
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Worker) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.CABundle != nil {
+ l = len(*m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CRI != nil {
+ l = m.CRI.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Kubernetes != nil {
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Machine.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Maximum))
+ n += 1 + sovGenerated(uint64(m.Minimum))
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Taints) > 0 {
+ for _, e := range m.Taints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Volume != nil {
+ l = m.Volume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.DataVolumes) > 0 {
+ for _, e := range m.DataVolumes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.KubeletDataVolumeName != nil {
+ l = len(*m.KubeletDataVolumeName)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Zones) > 0 {
+ for _, s := range m.Zones {
+ l = len(s)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.SystemComponents != nil {
+ l = m.SystemComponents.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.MachineControllerManagerSettings != nil {
+ l = m.MachineControllerManagerSettings.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *WorkerKubernetes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Kubelet != nil {
+ l = m.Kubelet.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *WorkerSystemComponents) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Addon) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Addon{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Addons) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Addons{`,
+ `KubernetesDashboard:` + strings.Replace(this.KubernetesDashboard.String(), "KubernetesDashboard", "KubernetesDashboard", 1) + `,`,
+ `NginxIngress:` + strings.Replace(this.NginxIngress.String(), "NginxIngress", "NginxIngress", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AdmissionPlugin) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AdmissionPlugin{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Alerting) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Alerting{`,
+ `EmailReceivers:` + fmt.Sprintf("%v", this.EmailReceivers) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AuditConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AuditConfig{`,
+ `AuditPolicy:` + strings.Replace(this.AuditPolicy.String(), "AuditPolicy", "AuditPolicy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AuditPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AuditPolicy{`,
+ `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ObjectReference", "v1.ObjectReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AvailabilityZone) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AvailabilityZone{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `UnavailableMachineTypes:` + fmt.Sprintf("%v", this.UnavailableMachineTypes) + `,`,
+ `UnavailableVolumeTypes:` + fmt.Sprintf("%v", this.UnavailableVolumeTypes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucket) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucket{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BackupBucketSpec", "BackupBucketSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BackupBucketStatus", "BackupBucketStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]BackupBucket{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BackupBucket", "BackupBucket", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BackupBucketList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucketProvider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucketSpec{`,
+ `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "BackupBucketProvider", "BackupBucketProvider", 1), `&`, ``, 1) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `Seed:` + valueToStringGenerated(this.Seed) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucketStatus{`,
+ `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`,
+ `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `GeneratedSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.GeneratedSecretRef), "SecretReference", "v1.SecretReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntry) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupEntry{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BackupEntrySpec", "BackupEntrySpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BackupEntryStatus", "BackupEntryStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntryList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]BackupEntry{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BackupEntry", "BackupEntry", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BackupEntryList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntrySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupEntrySpec{`,
+ `BucketName:` + fmt.Sprintf("%v", this.BucketName) + `,`,
+ `Seed:` + valueToStringGenerated(this.Seed) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntryStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupEntryStatus{`,
+ `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`,
+ `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CRI) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForContainerRuntimes := "[]ContainerRuntime{"
+ for _, f := range this.ContainerRuntimes {
+ repeatedStringForContainerRuntimes += strings.Replace(strings.Replace(f.String(), "ContainerRuntime", "ContainerRuntime", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForContainerRuntimes += "}"
+ s := strings.Join([]string{`&CRI{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ContainerRuntimes:` + repeatedStringForContainerRuntimes + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CloudInfo{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudProfile) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CloudProfile{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CloudProfileSpec", "CloudProfileSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudProfileList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]CloudProfile{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CloudProfile", "CloudProfile", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&CloudProfileList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudProfileSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForMachineImages := "[]MachineImage{"
+ for _, f := range this.MachineImages {
+ repeatedStringForMachineImages += strings.Replace(strings.Replace(f.String(), "MachineImage", "MachineImage", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMachineImages += "}"
+ repeatedStringForMachineTypes := "[]MachineType{"
+ for _, f := range this.MachineTypes {
+ repeatedStringForMachineTypes += strings.Replace(strings.Replace(f.String(), "MachineType", "MachineType", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMachineTypes += "}"
+ repeatedStringForRegions := "[]Region{"
+ for _, f := range this.Regions {
+ repeatedStringForRegions += strings.Replace(strings.Replace(f.String(), "Region", "Region", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRegions += "}"
+ repeatedStringForVolumeTypes := "[]VolumeType{"
+ for _, f := range this.VolumeTypes {
+ repeatedStringForVolumeTypes += strings.Replace(strings.Replace(f.String(), "VolumeType", "VolumeType", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVolumeTypes += "}"
+ s := strings.Join([]string{`&CloudProfileSpec{`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesSettings", "KubernetesSettings", 1), `&`, ``, 1) + `,`,
+ `MachineImages:` + repeatedStringForMachineImages + `,`,
+ `MachineTypes:` + repeatedStringForMachineTypes + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Regions:` + repeatedStringForRegions + `,`,
+ `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `VolumeTypes:` + repeatedStringForVolumeTypes + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterAutoscaler) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterAutoscaler{`,
+ `ScaleDownDelayAfterAdd:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterAdd), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownDelayAfterDelete:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterDelete), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownDelayAfterFailure:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterFailure), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownUnneededTime:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownUnneededTime), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownUtilizationThreshold:` + valueToStringGenerated(this.ScaleDownUtilizationThreshold) + `,`,
+ `ScanInterval:` + strings.Replace(fmt.Sprintf("%v", this.ScanInterval), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterInfo{`,
+ `Cloud:` + strings.Replace(strings.Replace(this.Cloud.String(), "CloudInfo", "CloudInfo", 1), `&`, ``, 1) + `,`,
+ `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesInfo", "KubernetesInfo", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Condition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Condition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ContainerRuntime) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ContainerRuntime{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerDeployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerDeployment{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Policy:` + valueToStringGenerated(this.Policy) + `,`,
+ `SeedSelector:` + strings.Replace(fmt.Sprintf("%v", this.SeedSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerInstallation{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ControllerInstallationSpec", "ControllerInstallationSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ControllerInstallationStatus", "ControllerInstallationStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ControllerInstallation{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerInstallation", "ControllerInstallation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ControllerInstallationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallationSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerInstallationSpec{`,
+ `RegistrationRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RegistrationRef), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `SeedRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SeedRef), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallationStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ControllerInstallationStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRegistration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerRegistration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ControllerRegistrationSpec", "ControllerRegistrationSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRegistrationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ControllerRegistration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRegistration", "ControllerRegistration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ControllerRegistrationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRegistrationSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResources := "[]ControllerResource{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ControllerResource", "ControllerResource", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ s := strings.Join([]string{`&ControllerRegistrationSpec{`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `Deployment:` + strings.Replace(this.Deployment.String(), "ControllerDeployment", "ControllerDeployment", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerResource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerResource{`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `GloballyEnabled:` + valueToStringGenerated(this.GloballyEnabled) + `,`,
+ `ReconcileTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReconcileTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `Primary:` + valueToStringGenerated(this.Primary) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DNS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForProviders := "[]DNSProvider{"
+ for _, f := range this.Providers {
+ repeatedStringForProviders += strings.Replace(strings.Replace(f.String(), "DNSProvider", "DNSProvider", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForProviders += "}"
+ s := strings.Join([]string{`&DNS{`,
+ `Domain:` + valueToStringGenerated(this.Domain) + `,`,
+ `Providers:` + repeatedStringForProviders + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DNSIncludeExclude) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DNSIncludeExclude{`,
+ `Include:` + fmt.Sprintf("%v", this.Include) + `,`,
+ `Exclude:` + fmt.Sprintf("%v", this.Exclude) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DNSProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DNSProvider{`,
+ `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `Primary:` + valueToStringGenerated(this.Primary) + `,`,
+ `SecretName:` + valueToStringGenerated(this.SecretName) + `,`,
+ `Type:` + valueToStringGenerated(this.Type) + `,`,
+ `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DataVolume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DataVolume{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Type:` + valueToStringGenerated(this.Type) + `,`,
+ `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`,
+ `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Endpoint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Endpoint{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `URL:` + fmt.Sprintf("%v", this.URL) + `,`,
+ `Purpose:` + fmt.Sprintf("%v", this.Purpose) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExpirableVersion) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExpirableVersion{`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `ExpirationDate:` + strings.Replace(fmt.Sprintf("%v", this.ExpirationDate), "Time", "v11.Time", 1) + `,`,
+ `Classification:` + valueToStringGenerated(this.Classification) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Extension) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Extension{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Disabled:` + valueToStringGenerated(this.Disabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExtensionResourceState) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResources := "[]NamedResourceReference{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForResources += "}"
+ s := strings.Join([]string{`&ExtensionResourceState{`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Name:` + valueToStringGenerated(this.Name) + `,`,
+ `Purpose:` + valueToStringGenerated(this.Purpose) + `,`,
+ `State:` + strings.Replace(fmt.Sprintf("%v", this.State), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Gardener) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Gardener{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GardenerResourceData) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GardenerResourceData{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Hibernation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSchedules := "[]HibernationSchedule{"
+ for _, f := range this.Schedules {
+ repeatedStringForSchedules += strings.Replace(strings.Replace(f.String(), "HibernationSchedule", "HibernationSchedule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSchedules += "}"
+ s := strings.Join([]string{`&Hibernation{`,
+ `Enabled:` + valueToStringGenerated(this.Enabled) + `,`,
+ `Schedules:` + repeatedStringForSchedules + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HibernationSchedule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HibernationSchedule{`,
+ `Start:` + valueToStringGenerated(this.Start) + `,`,
+ `End:` + valueToStringGenerated(this.End) + `,`,
+ `Location:` + valueToStringGenerated(this.Location) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HorizontalPodAutoscalerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HorizontalPodAutoscalerConfig{`,
+ `CPUInitializationPeriod:` + strings.Replace(fmt.Sprintf("%v", this.CPUInitializationPeriod), "Duration", "v11.Duration", 1) + `,`,
+ `DownscaleDelay:` + strings.Replace(fmt.Sprintf("%v", this.DownscaleDelay), "Duration", "v11.Duration", 1) + `,`,
+ `DownscaleStabilization:` + strings.Replace(fmt.Sprintf("%v", this.DownscaleStabilization), "Duration", "v11.Duration", 1) + `,`,
+ `InitialReadinessDelay:` + strings.Replace(fmt.Sprintf("%v", this.InitialReadinessDelay), "Duration", "v11.Duration", 1) + `,`,
+ `SyncPeriod:` + strings.Replace(fmt.Sprintf("%v", this.SyncPeriod), "Duration", "v11.Duration", 1) + `,`,
+ `Tolerance:` + valueToStringGenerated(this.Tolerance) + `,`,
+ `UpscaleDelay:` + strings.Replace(fmt.Sprintf("%v", this.UpscaleDelay), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Ingress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Ingress{`,
+ `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`,
+ `Controller:` + strings.Replace(strings.Replace(this.Controller.String(), "IngressController", "IngressController", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressController) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressController{`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeAPIServerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForAdmissionPlugins := "[]AdmissionPlugin{"
+ for _, f := range this.AdmissionPlugins {
+ repeatedStringForAdmissionPlugins += strings.Replace(strings.Replace(f.String(), "AdmissionPlugin", "AdmissionPlugin", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForAdmissionPlugins += "}"
+ keysForRuntimeConfig := make([]string, 0, len(this.RuntimeConfig))
+ for k := range this.RuntimeConfig {
+ keysForRuntimeConfig = append(keysForRuntimeConfig, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRuntimeConfig)
+ mapStringForRuntimeConfig := "map[string]bool{"
+ for _, k := range keysForRuntimeConfig {
+ mapStringForRuntimeConfig += fmt.Sprintf("%v: %v,", k, this.RuntimeConfig[k])
+ }
+ mapStringForRuntimeConfig += "}"
+ s := strings.Join([]string{`&KubeAPIServerConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `AdmissionPlugins:` + repeatedStringForAdmissionPlugins + `,`,
+ `APIAudiences:` + fmt.Sprintf("%v", this.APIAudiences) + `,`,
+ `AuditConfig:` + strings.Replace(this.AuditConfig.String(), "AuditConfig", "AuditConfig", 1) + `,`,
+ `EnableBasicAuthentication:` + valueToStringGenerated(this.EnableBasicAuthentication) + `,`,
+ `OIDCConfig:` + strings.Replace(this.OIDCConfig.String(), "OIDCConfig", "OIDCConfig", 1) + `,`,
+ `RuntimeConfig:` + mapStringForRuntimeConfig + `,`,
+ `ServiceAccountConfig:` + strings.Replace(this.ServiceAccountConfig.String(), "ServiceAccountConfig", "ServiceAccountConfig", 1) + `,`,
+ `WatchCacheSizes:` + strings.Replace(this.WatchCacheSizes.String(), "WatchCacheSizes", "WatchCacheSizes", 1) + `,`,
+ `Requests:` + strings.Replace(this.Requests.String(), "KubeAPIServerRequests", "KubeAPIServerRequests", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeAPIServerRequests) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeAPIServerRequests{`,
+ `MaxNonMutatingInflight:` + valueToStringGenerated(this.MaxNonMutatingInflight) + `,`,
+ `MaxMutatingInflight:` + valueToStringGenerated(this.MaxMutatingInflight) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeControllerManagerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeControllerManagerConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `HorizontalPodAutoscalerConfig:` + strings.Replace(this.HorizontalPodAutoscalerConfig.String(), "HorizontalPodAutoscalerConfig", "HorizontalPodAutoscalerConfig", 1) + `,`,
+ `NodeCIDRMaskSize:` + valueToStringGenerated(this.NodeCIDRMaskSize) + `,`,
+ `PodEvictionTimeout:` + strings.Replace(fmt.Sprintf("%v", this.PodEvictionTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeProxyConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeProxyConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `Mode:` + valueToStringGenerated(this.Mode) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeSchedulerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeSchedulerConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `KubeMaxPDVols:` + valueToStringGenerated(this.KubeMaxPDVols) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `CPUCFSQuota:` + valueToStringGenerated(this.CPUCFSQuota) + `,`,
+ `CPUManagerPolicy:` + valueToStringGenerated(this.CPUManagerPolicy) + `,`,
+ `EvictionHard:` + strings.Replace(this.EvictionHard.String(), "KubeletConfigEviction", "KubeletConfigEviction", 1) + `,`,
+ `EvictionMaxPodGracePeriod:` + valueToStringGenerated(this.EvictionMaxPodGracePeriod) + `,`,
+ `EvictionMinimumReclaim:` + strings.Replace(this.EvictionMinimumReclaim.String(), "KubeletConfigEvictionMinimumReclaim", "KubeletConfigEvictionMinimumReclaim", 1) + `,`,
+ `EvictionPressureTransitionPeriod:` + strings.Replace(fmt.Sprintf("%v", this.EvictionPressureTransitionPeriod), "Duration", "v11.Duration", 1) + `,`,
+ `EvictionSoft:` + strings.Replace(this.EvictionSoft.String(), "KubeletConfigEviction", "KubeletConfigEviction", 1) + `,`,
+ `EvictionSoftGracePeriod:` + strings.Replace(this.EvictionSoftGracePeriod.String(), "KubeletConfigEvictionSoftGracePeriod", "KubeletConfigEvictionSoftGracePeriod", 1) + `,`,
+ `MaxPods:` + valueToStringGenerated(this.MaxPods) + `,`,
+ `PodPIDsLimit:` + valueToStringGenerated(this.PodPIDsLimit) + `,`,
+ `ImagePullProgressDeadline:` + strings.Replace(fmt.Sprintf("%v", this.ImagePullProgressDeadline), "Duration", "v11.Duration", 1) + `,`,
+ `FailSwapOn:` + valueToStringGenerated(this.FailSwapOn) + `,`,
+ `KubeReserved:` + strings.Replace(this.KubeReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`,
+ `SystemReserved:` + strings.Replace(this.SystemReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigEviction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigEviction{`,
+ `MemoryAvailable:` + valueToStringGenerated(this.MemoryAvailable) + `,`,
+ `ImageFSAvailable:` + valueToStringGenerated(this.ImageFSAvailable) + `,`,
+ `ImageFSInodesFree:` + valueToStringGenerated(this.ImageFSInodesFree) + `,`,
+ `NodeFSAvailable:` + valueToStringGenerated(this.NodeFSAvailable) + `,`,
+ `NodeFSInodesFree:` + valueToStringGenerated(this.NodeFSInodesFree) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigEvictionMinimumReclaim) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigEvictionMinimumReclaim{`,
+ `MemoryAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MemoryAvailable), "Quantity", "resource.Quantity", 1) + `,`,
+ `ImageFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSAvailable), "Quantity", "resource.Quantity", 1) + `,`,
+ `ImageFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSInodesFree), "Quantity", "resource.Quantity", 1) + `,`,
+ `NodeFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSAvailable), "Quantity", "resource.Quantity", 1) + `,`,
+ `NodeFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSInodesFree), "Quantity", "resource.Quantity", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigEvictionSoftGracePeriod) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigEvictionSoftGracePeriod{`,
+ `MemoryAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MemoryAvailable), "Duration", "v11.Duration", 1) + `,`,
+ `ImageFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSAvailable), "Duration", "v11.Duration", 1) + `,`,
+ `ImageFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSInodesFree), "Duration", "v11.Duration", 1) + `,`,
+ `NodeFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSAvailable), "Duration", "v11.Duration", 1) + `,`,
+ `NodeFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSInodesFree), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigReserved) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigReserved{`,
+ `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1) + `,`,
+ `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1) + `,`,
+ `EphemeralStorage:` + strings.Replace(fmt.Sprintf("%v", this.EphemeralStorage), "Quantity", "resource.Quantity", 1) + `,`,
+ `PID:` + strings.Replace(fmt.Sprintf("%v", this.PID), "Quantity", "resource.Quantity", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Kubernetes) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Kubernetes{`,
+ `AllowPrivilegedContainers:` + valueToStringGenerated(this.AllowPrivilegedContainers) + `,`,
+ `ClusterAutoscaler:` + strings.Replace(this.ClusterAutoscaler.String(), "ClusterAutoscaler", "ClusterAutoscaler", 1) + `,`,
+ `KubeAPIServer:` + strings.Replace(this.KubeAPIServer.String(), "KubeAPIServerConfig", "KubeAPIServerConfig", 1) + `,`,
+ `KubeControllerManager:` + strings.Replace(this.KubeControllerManager.String(), "KubeControllerManagerConfig", "KubeControllerManagerConfig", 1) + `,`,
+ `KubeScheduler:` + strings.Replace(this.KubeScheduler.String(), "KubeSchedulerConfig", "KubeSchedulerConfig", 1) + `,`,
+ `KubeProxy:` + strings.Replace(this.KubeProxy.String(), "KubeProxyConfig", "KubeProxyConfig", 1) + `,`,
+ `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "VerticalPodAutoscaler", "VerticalPodAutoscaler", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForFeatureGates := make([]string, 0, len(this.FeatureGates))
+ for k := range this.FeatureGates {
+ keysForFeatureGates = append(keysForFeatureGates, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForFeatureGates)
+ mapStringForFeatureGates := "map[string]bool{"
+ for _, k := range keysForFeatureGates {
+ mapStringForFeatureGates += fmt.Sprintf("%v: %v,", k, this.FeatureGates[k])
+ }
+ mapStringForFeatureGates += "}"
+ s := strings.Join([]string{`&KubernetesConfig{`,
+ `FeatureGates:` + mapStringForFeatureGates + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesDashboard) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubernetesDashboard{`,
+ `AuthenticationMode:` + valueToStringGenerated(this.AuthenticationMode) + `,`,
+ `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "Addon", "Addon", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubernetesInfo{`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesSettings) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVersions := "[]ExpirableVersion{"
+ for _, f := range this.Versions {
+ repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVersions += "}"
+ s := strings.Join([]string{`&KubernetesSettings{`,
+ `Versions:` + repeatedStringForVersions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LastError) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LastError{`,
+ `Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+ `TaskID:` + valueToStringGenerated(this.TaskID) + `,`,
+ `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`,
+ `LastUpdateTime:` + strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LastOperation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LastOperation{`,
+ `Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`,
+ `State:` + fmt.Sprintf("%v", this.State) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Machine) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Machine{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Image:` + strings.Replace(this.Image.String(), "ShootMachineImage", "ShootMachineImage", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineControllerManagerSettings) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MachineControllerManagerSettings{`,
+ `MachineDrainTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineDrainTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `MachineHealthTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineHealthTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `MachineCreationTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineCreationTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `MaxEvictRetries:` + valueToStringGenerated(this.MaxEvictRetries) + `,`,
+ `NodeConditions:` + fmt.Sprintf("%v", this.NodeConditions) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineImage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVersions := "[]MachineImageVersion{"
+ for _, f := range this.Versions {
+ repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "MachineImageVersion", "MachineImageVersion", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVersions += "}"
+ s := strings.Join([]string{`&MachineImage{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Versions:` + repeatedStringForVersions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineImageVersion) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForCRI := "[]CRI{"
+ for _, f := range this.CRI {
+ repeatedStringForCRI += strings.Replace(strings.Replace(f.String(), "CRI", "CRI", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForCRI += "}"
+ s := strings.Join([]string{`&MachineImageVersion{`,
+ `ExpirableVersion:` + strings.Replace(strings.Replace(this.ExpirableVersion.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + `,`,
+ `CRI:` + repeatedStringForCRI + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineType) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MachineType{`,
+ `CPU:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `GPU:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GPU), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `Memory:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Storage:` + strings.Replace(this.Storage.String(), "MachineTypeStorage", "MachineTypeStorage", 1) + `,`,
+ `Usable:` + valueToStringGenerated(this.Usable) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineTypeStorage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MachineTypeStorage{`,
+ `Class:` + fmt.Sprintf("%v", this.Class) + `,`,
+ `StorageSize:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StorageSize), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Maintenance) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Maintenance{`,
+ `AutoUpdate:` + strings.Replace(this.AutoUpdate.String(), "MaintenanceAutoUpdate", "MaintenanceAutoUpdate", 1) + `,`,
+ `TimeWindow:` + strings.Replace(this.TimeWindow.String(), "MaintenanceTimeWindow", "MaintenanceTimeWindow", 1) + `,`,
+ `ConfineSpecUpdateRollout:` + valueToStringGenerated(this.ConfineSpecUpdateRollout) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MaintenanceAutoUpdate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MaintenanceAutoUpdate{`,
+ `KubernetesVersion:` + fmt.Sprintf("%v", this.KubernetesVersion) + `,`,
+ `MachineImageVersion:` + fmt.Sprintf("%v", this.MachineImageVersion) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MaintenanceTimeWindow) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MaintenanceTimeWindow{`,
+ `Begin:` + fmt.Sprintf("%v", this.Begin) + `,`,
+ `End:` + fmt.Sprintf("%v", this.End) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Monitoring) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Monitoring{`,
+ `Alerting:` + strings.Replace(this.Alerting.String(), "Alerting", "Alerting", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedResourceReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedResourceReference{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ResourceRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRef), "CrossVersionObjectReference", "v12.CrossVersionObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Networking) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Networking{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Pods:` + valueToStringGenerated(this.Pods) + `,`,
+ `Nodes:` + valueToStringGenerated(this.Nodes) + `,`,
+ `Services:` + valueToStringGenerated(this.Services) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NginxIngress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForConfig := make([]string, 0, len(this.Config))
+ for k := range this.Config {
+ keysForConfig = append(keysForConfig, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForConfig)
+ mapStringForConfig := "map[string]string{"
+ for _, k := range keysForConfig {
+ mapStringForConfig += fmt.Sprintf("%v: %v,", k, this.Config[k])
+ }
+ mapStringForConfig += "}"
+ s := strings.Join([]string{`&NginxIngress{`,
+ `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`,
+ `Config:` + mapStringForConfig + `,`,
+ `ExternalTrafficPolicy:` + valueToStringGenerated(this.ExternalTrafficPolicy) + `,`,
+ `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "Addon", "Addon", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OIDCConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForRequiredClaims := make([]string, 0, len(this.RequiredClaims))
+ for k := range this.RequiredClaims {
+ keysForRequiredClaims = append(keysForRequiredClaims, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims)
+ mapStringForRequiredClaims := "map[string]string{"
+ for _, k := range keysForRequiredClaims {
+ mapStringForRequiredClaims += fmt.Sprintf("%v: %v,", k, this.RequiredClaims[k])
+ }
+ mapStringForRequiredClaims += "}"
+ s := strings.Join([]string{`&OIDCConfig{`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `ClientAuthentication:` + strings.Replace(this.ClientAuthentication.String(), "OpenIDConnectClientAuthentication", "OpenIDConnectClientAuthentication", 1) + `,`,
+ `ClientID:` + valueToStringGenerated(this.ClientID) + `,`,
+ `GroupsClaim:` + valueToStringGenerated(this.GroupsClaim) + `,`,
+ `GroupsPrefix:` + valueToStringGenerated(this.GroupsPrefix) + `,`,
+ `IssuerURL:` + valueToStringGenerated(this.IssuerURL) + `,`,
+ `RequiredClaims:` + mapStringForRequiredClaims + `,`,
+ `SigningAlgs:` + fmt.Sprintf("%v", this.SigningAlgs) + `,`,
+ `UsernameClaim:` + valueToStringGenerated(this.UsernameClaim) + `,`,
+ `UsernamePrefix:` + valueToStringGenerated(this.UsernamePrefix) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OpenIDConnectClientAuthentication) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForExtraConfig := make([]string, 0, len(this.ExtraConfig))
+ for k := range this.ExtraConfig {
+ keysForExtraConfig = append(keysForExtraConfig, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig)
+ mapStringForExtraConfig := "map[string]string{"
+ for _, k := range keysForExtraConfig {
+ mapStringForExtraConfig += fmt.Sprintf("%v: %v,", k, this.ExtraConfig[k])
+ }
+ mapStringForExtraConfig += "}"
+ s := strings.Join([]string{`&OpenIDConnectClientAuthentication{`,
+ `ExtraConfig:` + mapStringForExtraConfig + `,`,
+ `Secret:` + valueToStringGenerated(this.Secret) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Plant) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Plant{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PlantSpec", "PlantSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PlantStatus", "PlantStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PlantList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Plant{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Plant", "Plant", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&PlantList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PlantSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEndpoints := "[]Endpoint{"
+ for _, f := range this.Endpoints {
+ repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEndpoints += "}"
+ s := strings.Join([]string{`&PlantSpec{`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "v1.LocalObjectReference", 1), `&`, ``, 1) + `,`,
+ `Endpoints:` + repeatedStringForEndpoints + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PlantStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&PlantStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`,
+ `ClusterInfo:` + strings.Replace(this.ClusterInfo.String(), "ClusterInfo", "ClusterInfo", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Project) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Project{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Project{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ProjectList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectMember) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProjectMember{`,
+ `Subject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subject), "Subject", "v13.Subject", 1), `&`, ``, 1) + `,`,
+ `Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+ `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForMembers := "[]ProjectMember{"
+ for _, f := range this.Members {
+ repeatedStringForMembers += strings.Replace(strings.Replace(f.String(), "ProjectMember", "ProjectMember", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMembers += "}"
+ s := strings.Join([]string{`&ProjectSpec{`,
+ `CreatedBy:` + strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Subject", "v13.Subject", 1) + `,`,
+ `Description:` + valueToStringGenerated(this.Description) + `,`,
+ `Owner:` + strings.Replace(fmt.Sprintf("%v", this.Owner), "Subject", "v13.Subject", 1) + `,`,
+ `Purpose:` + valueToStringGenerated(this.Purpose) + `,`,
+ `Members:` + repeatedStringForMembers + `,`,
+ `Namespace:` + valueToStringGenerated(this.Namespace) + `,`,
+ `Tolerations:` + strings.Replace(this.Tolerations.String(), "ProjectTolerations", "ProjectTolerations", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProjectStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+ `StaleSinceTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleSinceTimestamp), "Time", "v11.Time", 1) + `,`,
+ `StaleAutoDeleteTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleAutoDeleteTimestamp), "Time", "v11.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectTolerations) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForDefaults := "[]Toleration{"
+ for _, f := range this.Defaults {
+ repeatedStringForDefaults += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDefaults += "}"
+ repeatedStringForWhitelist := "[]Toleration{"
+ for _, f := range this.Whitelist {
+ repeatedStringForWhitelist += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWhitelist += "}"
+ s := strings.Join([]string{`&ProjectTolerations{`,
+ `Defaults:` + repeatedStringForDefaults + `,`,
+ `Whitelist:` + repeatedStringForWhitelist + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Provider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForWorkers := "[]Worker{"
+ for _, f := range this.Workers {
+ repeatedStringForWorkers += strings.Replace(strings.Replace(f.String(), "Worker", "Worker", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWorkers += "}"
+ s := strings.Join([]string{`&Provider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ControlPlaneConfig:` + strings.Replace(fmt.Sprintf("%v", this.ControlPlaneConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `InfrastructureConfig:` + strings.Replace(fmt.Sprintf("%v", this.InfrastructureConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Workers:` + repeatedStringForWorkers + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Quota) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Quota{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "QuotaSpec", "QuotaSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QuotaList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Quota{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Quota", "Quota", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&QuotaList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QuotaSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForMetrics := make([]string, 0, len(this.Metrics))
+ for k := range this.Metrics {
+ keysForMetrics = append(keysForMetrics, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics)
+ mapStringForMetrics := "k8s_io_api_core_v1.ResourceList{"
+ for _, k := range keysForMetrics {
+ mapStringForMetrics += fmt.Sprintf("%v: %v,", k, this.Metrics[k8s_io_api_core_v1.ResourceName(k)])
+ }
+ mapStringForMetrics += "}"
+ s := strings.Join([]string{`&QuotaSpec{`,
+ `ClusterLifetimeDays:` + valueToStringGenerated(this.ClusterLifetimeDays) + `,`,
+ `Metrics:` + mapStringForMetrics + `,`,
+ `Scope:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Scope), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Region) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForZones := "[]AvailabilityZone{"
+ for _, f := range this.Zones {
+ repeatedStringForZones += strings.Replace(strings.Replace(f.String(), "AvailabilityZone", "AvailabilityZone", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForZones += "}"
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&Region{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Zones:` + repeatedStringForZones + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceData) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceData{`,
+ `CrossVersionObjectReference:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CrossVersionObjectReference), "CrossVersionObjectReference", "v12.CrossVersionObjectReference", 1), `&`, ``, 1) + `,`,
+ `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceWatchCacheSize) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceWatchCacheSize{`,
+ `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
+ `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+ `CacheSize:` + fmt.Sprintf("%v", this.CacheSize) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForQuotas := "[]ObjectReference{"
+ for _, f := range this.Quotas {
+ repeatedStringForQuotas += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForQuotas += "}"
+ s := strings.Join([]string{`&SecretBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `Quotas:` + repeatedStringForQuotas + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]SecretBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "SecretBinding", "SecretBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&SecretBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Seed) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Seed{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SeedSpec", "SeedSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SeedStatus", "SeedStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedBackup) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedBackup{`,
+ `Provider:` + fmt.Sprintf("%v", this.Provider) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Region:` + valueToStringGenerated(this.Region) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedDNS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedDNS{`,
+ `IngressDomain:` + valueToStringGenerated(this.IngressDomain) + `,`,
+ `Provider:` + strings.Replace(this.Provider.String(), "SeedDNSProvider", "SeedDNSProvider", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedDNSProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedDNSProvider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Seed{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Seed", "Seed", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&SeedList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedNetworks) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedNetworks{`,
+ `Nodes:` + valueToStringGenerated(this.Nodes) + `,`,
+ `Pods:` + fmt.Sprintf("%v", this.Pods) + `,`,
+ `Services:` + fmt.Sprintf("%v", this.Services) + `,`,
+ `ShootDefaults:` + strings.Replace(this.ShootDefaults.String(), "ShootNetworks", "ShootNetworks", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedProvider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSelector{`,
+ `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
+ `ProviderTypes:` + fmt.Sprintf("%v", this.ProviderTypes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingExcessCapacityReservation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingExcessCapacityReservation{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingLoadBalancerServices) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ s := strings.Join([]string{`&SeedSettingLoadBalancerServices{`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingScheduling) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingScheduling{`,
+ `Visible:` + fmt.Sprintf("%v", this.Visible) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingShootDNS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingShootDNS{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingVerticalPodAutoscaler) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingVerticalPodAutoscaler{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettings) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettings{`,
+ `ExcessCapacityReservation:` + strings.Replace(this.ExcessCapacityReservation.String(), "SeedSettingExcessCapacityReservation", "SeedSettingExcessCapacityReservation", 1) + `,`,
+ `Scheduling:` + strings.Replace(this.Scheduling.String(), "SeedSettingScheduling", "SeedSettingScheduling", 1) + `,`,
+ `ShootDNS:` + strings.Replace(this.ShootDNS.String(), "SeedSettingShootDNS", "SeedSettingShootDNS", 1) + `,`,
+ `LoadBalancerServices:` + strings.Replace(this.LoadBalancerServices.String(), "SeedSettingLoadBalancerServices", "SeedSettingLoadBalancerServices", 1) + `,`,
+ `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "SeedSettingVerticalPodAutoscaler", "SeedSettingVerticalPodAutoscaler", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTaints := "[]SeedTaint{"
+ for _, f := range this.Taints {
+ repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "SeedTaint", "SeedTaint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTaints += "}"
+ s := strings.Join([]string{`&SeedSpec{`,
+ `Backup:` + strings.Replace(this.Backup.String(), "SeedBackup", "SeedBackup", 1) + `,`,
+ `BlockCIDRs:` + fmt.Sprintf("%v", this.BlockCIDRs) + `,`,
+ `DNS:` + strings.Replace(strings.Replace(this.DNS.String(), "SeedDNS", "SeedDNS", 1), `&`, ``, 1) + `,`,
+ `Networks:` + strings.Replace(strings.Replace(this.Networks.String(), "SeedNetworks", "SeedNetworks", 1), `&`, ``, 1) + `,`,
+ `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "SeedProvider", "SeedProvider", 1), `&`, ``, 1) + `,`,
+ `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1) + `,`,
+ `Taints:` + repeatedStringForTaints + `,`,
+ `Volume:` + strings.Replace(this.Volume.String(), "SeedVolume", "SeedVolume", 1) + `,`,
+ `Settings:` + strings.Replace(this.Settings.String(), "SeedSettings", "SeedSettings", 1) + `,`,
+ `Ingress:` + strings.Replace(this.Ingress.String(), "Ingress", "Ingress", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&SeedStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Gardener:` + strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1) + `,`,
+ `KubernetesVersion:` + valueToStringGenerated(this.KubernetesVersion) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedTaint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedTaint{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + valueToStringGenerated(this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedVolume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForProviders := "[]SeedVolumeProvider{"
+ for _, f := range this.Providers {
+ repeatedStringForProviders += strings.Replace(strings.Replace(f.String(), "SeedVolumeProvider", "SeedVolumeProvider", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForProviders += "}"
+ s := strings.Join([]string{`&SeedVolume{`,
+ `MinimumSize:` + strings.Replace(fmt.Sprintf("%v", this.MinimumSize), "Quantity", "resource.Quantity", 1) + `,`,
+ `Providers:` + repeatedStringForProviders + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedVolumeProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedVolumeProvider{`,
+ `Purpose:` + fmt.Sprintf("%v", this.Purpose) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceAccountConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceAccountConfig{`,
+ `Issuer:` + valueToStringGenerated(this.Issuer) + `,`,
+ `SigningKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SigningKeySecret), "LocalObjectReference", "v1.LocalObjectReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Shoot) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Shoot{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ShootSpec", "ShootSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ShootStatus", "ShootStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Shoot{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Shoot", "Shoot", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ShootList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootMachineImage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ShootMachineImage{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Version:` + valueToStringGenerated(this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootNetworks) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ShootNetworks{`,
+ `Pods:` + valueToStringGenerated(this.Pods) + `,`,
+ `Services:` + valueToStringGenerated(this.Services) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForExtensions := "[]Extension{"
+ for _, f := range this.Extensions {
+ repeatedStringForExtensions += strings.Replace(strings.Replace(f.String(), "Extension", "Extension", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExtensions += "}"
+ repeatedStringForResources := "[]NamedResourceReference{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "NamedResourceReference", "NamedResourceReference", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ repeatedStringForTolerations := "[]Toleration{"
+ for _, f := range this.Tolerations {
+ repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTolerations += "}"
+ s := strings.Join([]string{`&ShootSpec{`,
+ `Addons:` + strings.Replace(this.Addons.String(), "Addons", "Addons", 1) + `,`,
+ `CloudProfileName:` + fmt.Sprintf("%v", this.CloudProfileName) + `,`,
+ `DNS:` + strings.Replace(this.DNS.String(), "DNS", "DNS", 1) + `,`,
+ `Extensions:` + repeatedStringForExtensions + `,`,
+ `Hibernation:` + strings.Replace(this.Hibernation.String(), "Hibernation", "Hibernation", 1) + `,`,
+ `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "Kubernetes", "Kubernetes", 1), `&`, ``, 1) + `,`,
+ `Networking:` + strings.Replace(strings.Replace(this.Networking.String(), "Networking", "Networking", 1), `&`, ``, 1) + `,`,
+ `Maintenance:` + strings.Replace(this.Maintenance.String(), "Maintenance", "Maintenance", 1) + `,`,
+ `Monitoring:` + strings.Replace(this.Monitoring.String(), "Monitoring", "Monitoring", 1) + `,`,
+ `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "Provider", "Provider", 1), `&`, ``, 1) + `,`,
+ `Purpose:` + valueToStringGenerated(this.Purpose) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `SecretBindingName:` + fmt.Sprintf("%v", this.SecretBindingName) + `,`,
+ `SeedName:` + valueToStringGenerated(this.SeedName) + `,`,
+ `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `Tolerations:` + repeatedStringForTolerations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootState) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ShootState{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ShootStateSpec", "ShootStateSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootStateList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ShootState{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ShootState", "ShootState", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ShootStateList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootStateSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForGardener := "[]GardenerResourceData{"
+ for _, f := range this.Gardener {
+ repeatedStringForGardener += strings.Replace(strings.Replace(f.String(), "GardenerResourceData", "GardenerResourceData", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForGardener += "}"
+ repeatedStringForExtensions := "[]ExtensionResourceState{"
+ for _, f := range this.Extensions {
+ repeatedStringForExtensions += strings.Replace(strings.Replace(f.String(), "ExtensionResourceState", "ExtensionResourceState", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExtensions += "}"
+ repeatedStringForResources := "[]ResourceData{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceData", "ResourceData", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ s := strings.Join([]string{`&ShootStateSpec{`,
+ `Gardener:` + repeatedStringForGardener + `,`,
+ `Extensions:` + repeatedStringForExtensions + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ repeatedStringForConstraints := "[]Condition{"
+ for _, f := range this.Constraints {
+ repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConstraints += "}"
+ repeatedStringForLastErrors := "[]LastError{"
+ for _, f := range this.LastErrors {
+ repeatedStringForLastErrors += strings.Replace(strings.Replace(f.String(), "LastError", "LastError", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForLastErrors += "}"
+ s := strings.Join([]string{`&ShootStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Constraints:` + repeatedStringForConstraints + `,`,
+ `Gardener:` + strings.Replace(strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1), `&`, ``, 1) + `,`,
+ `IsHibernated:` + fmt.Sprintf("%v", this.IsHibernated) + `,`,
+ `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`,
+ `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`,
+ `LastErrors:` + repeatedStringForLastErrors + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `RetryCycleStartTime:` + strings.Replace(fmt.Sprintf("%v", this.RetryCycleStartTime), "Time", "v11.Time", 1) + `,`,
+ `Seed:` + valueToStringGenerated(this.Seed) + `,`,
+ `TechnicalID:` + fmt.Sprintf("%v", this.TechnicalID) + `,`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Toleration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Toleration{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + valueToStringGenerated(this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *VerticalPodAutoscaler) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&VerticalPodAutoscaler{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `EvictAfterOOMThreshold:` + strings.Replace(fmt.Sprintf("%v", this.EvictAfterOOMThreshold), "Duration", "v11.Duration", 1) + `,`,
+ `EvictionRateBurst:` + valueToStringGenerated(this.EvictionRateBurst) + `,`,
+ `EvictionRateLimit:` + valueToStringGenerated(this.EvictionRateLimit) + `,`,
+ `EvictionTolerance:` + valueToStringGenerated(this.EvictionTolerance) + `,`,
+ `RecommendationMarginFraction:` + valueToStringGenerated(this.RecommendationMarginFraction) + `,`,
+ `UpdaterInterval:` + strings.Replace(fmt.Sprintf("%v", this.UpdaterInterval), "Duration", "v11.Duration", 1) + `,`,
+ `RecommenderInterval:` + strings.Replace(fmt.Sprintf("%v", this.RecommenderInterval), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Volume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Volume{`,
+ `Name:` + valueToStringGenerated(this.Name) + `,`,
+ `Type:` + valueToStringGenerated(this.Type) + `,`,
+ `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`,
+ `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *VolumeType) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&VolumeType{`,
+ `Class:` + fmt.Sprintf("%v", this.Class) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Usable:` + valueToStringGenerated(this.Usable) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WatchCacheSizes) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResources := "[]ResourceWatchCacheSize{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceWatchCacheSize", "ResourceWatchCacheSize", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ s := strings.Join([]string{`&WatchCacheSizes{`,
+ `Default:` + valueToStringGenerated(this.Default) + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Worker) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTaints := "[]Taint{"
+ for _, f := range this.Taints {
+ repeatedStringForTaints += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForTaints += "}"
+ repeatedStringForDataVolumes := "[]DataVolume{"
+ for _, f := range this.DataVolumes {
+ repeatedStringForDataVolumes += strings.Replace(strings.Replace(f.String(), "DataVolume", "DataVolume", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDataVolumes += "}"
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&Worker{`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `CRI:` + strings.Replace(this.CRI.String(), "CRI", "CRI", 1) + `,`,
+ `Kubernetes:` + strings.Replace(this.Kubernetes.String(), "WorkerKubernetes", "WorkerKubernetes", 1) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Machine:` + strings.Replace(strings.Replace(this.Machine.String(), "Machine", "Machine", 1), `&`, ``, 1) + `,`,
+ `Maximum:` + fmt.Sprintf("%v", this.Maximum) + `,`,
+ `Minimum:` + fmt.Sprintf("%v", this.Minimum) + `,`,
+ `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Taints:` + repeatedStringForTaints + `,`,
+ `Volume:` + strings.Replace(this.Volume.String(), "Volume", "Volume", 1) + `,`,
+ `DataVolumes:` + repeatedStringForDataVolumes + `,`,
+ `KubeletDataVolumeName:` + valueToStringGenerated(this.KubeletDataVolumeName) + `,`,
+ `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`,
+ `SystemComponents:` + strings.Replace(this.SystemComponents.String(), "WorkerSystemComponents", "WorkerSystemComponents", 1) + `,`,
+ `MachineControllerManagerSettings:` + strings.Replace(this.MachineControllerManagerSettings.String(), "MachineControllerManagerSettings", "MachineControllerManagerSettings", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WorkerKubernetes) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WorkerKubernetes{`,
+ `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WorkerSystemComponents) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WorkerSystemComponents{`,
+ `Allow:` + fmt.Sprintf("%v", this.Allow) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Addon) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Addon: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Addon: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Addons) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Addons: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Addons: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesDashboard", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubernetesDashboard == nil {
+ m.KubernetesDashboard = &KubernetesDashboard{}
+ }
+ if err := m.KubernetesDashboard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxIngress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NginxIngress == nil {
+ m.NginxIngress = &NginxIngress{}
+ }
+ if err := m.NginxIngress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AdmissionPlugin) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AdmissionPlugin: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AdmissionPlugin: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Config == nil {
+ m.Config = &runtime.RawExtension{}
+ }
+ if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Alerting) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Alerting: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Alerting: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EmailReceivers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EmailReceivers = append(m.EmailReceivers, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuditConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuditPolicy == nil {
+ m.AuditPolicy = &AuditPolicy{}
+ }
+ if err := m.AuditPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuditPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigMapRef == nil {
+ m.ConfigMapRef = &v1.ObjectReference{}
+ }
+ if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AvailabilityZone) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AvailabilityZone: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AvailabilityZone: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableMachineTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UnavailableMachineTypes = append(m.UnavailableMachineTypes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableVolumeTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UnavailableVolumeTypes = append(m.UnavailableVolumeTypes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucket) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucket: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucket: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, BackupBucket{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Seed", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Seed = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderStatus == nil {
+ m.ProviderStatus = &runtime.RawExtension{}
+ }
+ if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastOperation == nil {
+ m.LastOperation = &LastOperation{}
+ }
+ if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastError == nil {
+ m.LastError = &LastError{}
+ }
+ if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GeneratedSecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GeneratedSecretRef == nil {
+ m.GeneratedSecretRef = &v1.SecretReference{}
+ }
+ if err := m.GeneratedSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntryList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntryList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntryList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, BackupEntry{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntrySpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntrySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntrySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BucketName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BucketName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Seed", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Seed = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntryStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntryStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntryStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastOperation == nil {
+ m.LastOperation = &LastOperation{}
+ }
+ if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastError == nil {
+ m.LastError = &LastError{}
+ }
+ if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CRI) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CRI: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CRI: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = CRIName(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerRuntimes = append(m.ContainerRuntimes, ContainerRuntime{})
+ if err := m.ContainerRuntimes[len(m.ContainerRuntimes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudProfile) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudProfile: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudProfile: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudProfileList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudProfileList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudProfileList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CloudProfile{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudProfileSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudProfileSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudProfileSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CABundle = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineImages", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MachineImages = append(m.MachineImages, MachineImage{})
+ if err := m.MachineImages[len(m.MachineImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineTypes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MachineTypes = append(m.MachineTypes, MachineType{})
+ if err := m.MachineTypes[len(m.MachineTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Regions = append(m.Regions, Region{})
+ if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SeedSelector == nil {
+ m.SeedSelector = &SeedSelector{}
+ }
+ if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeTypes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeTypes = append(m.VolumeTypes, VolumeType{})
+ if err := m.VolumeTypes[len(m.VolumeTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterAutoscaler) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownDelayAfterAdd == nil {
+ m.ScaleDownDelayAfterAdd = &v11.Duration{}
+ }
+ if err := m.ScaleDownDelayAfterAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownDelayAfterDelete == nil {
+ m.ScaleDownDelayAfterDelete = &v11.Duration{}
+ }
+ if err := m.ScaleDownDelayAfterDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterFailure", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownDelayAfterFailure == nil {
+ m.ScaleDownDelayAfterFailure = &v11.Duration{}
+ }
+ if err := m.ScaleDownDelayAfterFailure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownUnneededTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownUnneededTime == nil {
+ m.ScaleDownUnneededTime = &v11.Duration{}
+ }
+ if err := m.ScaleDownUnneededTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownUtilizationThreshold", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.ScaleDownUtilizationThreshold = &v2
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScanInterval", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScanInterval == nil {
+ m.ScanInterval = &v11.Duration{}
+ }
+ if err := m.ScanInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cloud", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Cloud.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Condition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Condition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerRuntime) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerRuntime: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerRuntime: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerDeployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerDeployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerDeployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ControllerDeploymentPolicy(dAtA[iNdEx:postIndex])
+ m.Policy = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SeedSelector == nil {
+ m.SeedSelector = &v11.LabelSelector{}
+ }
+ if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ControllerInstallation{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallationSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallationSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallationSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RegistrationRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RegistrationRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SeedRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallationStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallationStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallationStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderStatus == nil {
+ m.ProviderStatus = &runtime.RawExtension{}
+ }
+ if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRegistration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRegistration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRegistration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRegistrationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRegistrationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRegistrationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ControllerRegistration{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRegistrationSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRegistrationSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRegistrationSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, ControllerResource{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Deployment == nil {
+ m.Deployment = &ControllerDeployment{}
+ }
+ if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerResource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerResource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerResource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GloballyEnabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.GloballyEnabled = &b
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReconcileTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ReconcileTimeout == nil {
+ m.ReconcileTimeout = &v11.Duration{}
+ }
+ if err := m.ReconcileTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Primary = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DNS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DNS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DNS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Domain = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Providers = append(m.Providers, DNSProvider{})
+ if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DNSIncludeExclude: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DNSIncludeExclude: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Include", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Include = append(m.Include, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exclude", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exclude = append(m.Exclude, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DNSProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DNSProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DNSProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Domains == nil {
+ m.Domains = &DNSIncludeExclude{}
+ }
+ if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Primary = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SecretName = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Type = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Zones == nil {
+ m.Zones = &DNSIncludeExclude{}
+ }
+ if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DataVolume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Type = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeSize = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Encrypted = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Endpoint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Endpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.URL = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Purpose = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExpirableVersion) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExpirableVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExpirableVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirationDate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExpirationDate == nil {
+ m.ExpirationDate = &v11.Time{}
+ }
+ if err := m.ExpirationDate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Classification", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := VersionClassification(dAtA[iNdEx:postIndex])
+ m.Classification = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Extension) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Extension: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Disabled = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExtensionResourceState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExtensionResourceState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExtensionResourceState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Name = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Purpose = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.State == nil {
+ m.State = &runtime.RawExtension{}
+ }
+ if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, v1beta1.NamedResourceReference{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Gardener) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Gardener: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Gardener: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GardenerResourceData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GardenerResourceData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GardenerResourceData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Hibernation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Hibernation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Hibernation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Enabled = &b
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Schedules = append(m.Schedules, HibernationSchedule{})
+ if err := m.Schedules[len(m.Schedules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HibernationSchedule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HibernationSchedule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HibernationSchedule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Start = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.End = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Location = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUInitializationPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CPUInitializationPeriod == nil {
+ m.CPUInitializationPeriod = &v11.Duration{}
+ }
+ if err := m.CPUInitializationPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DownscaleDelay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DownscaleDelay == nil {
+ m.DownscaleDelay = &v11.Duration{}
+ }
+ if err := m.DownscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DownscaleStabilization", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DownscaleStabilization == nil {
+ m.DownscaleStabilization = &v11.Duration{}
+ }
+ if err := m.DownscaleStabilization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InitialReadinessDelay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.InitialReadinessDelay == nil {
+ m.InitialReadinessDelay = &v11.Duration{}
+ }
+ if err := m.InitialReadinessDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SyncPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SyncPeriod == nil {
+ m.SyncPeriod = &v11.Duration{}
+ }
+ if err := m.SyncPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.Tolerance = &v2
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpscaleDelay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpscaleDelay == nil {
+ m.UpscaleDelay = &v11.Duration{}
+ }
+ if err := m.UpscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Ingress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Domain = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Controller.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressController) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressController: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressController: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeAPIServerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeAPIServerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdmissionPlugins", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AdmissionPlugins = append(m.AdmissionPlugins, AdmissionPlugin{})
+ if err := m.AdmissionPlugins[len(m.AdmissionPlugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIAudiences", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIAudiences = append(m.APIAudiences, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuditConfig == nil {
+ m.AuditConfig = &AuditConfig{}
+ }
+ if err := m.AuditConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EnableBasicAuthentication", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.EnableBasicAuthentication = &b
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OIDCConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.OIDCConfig == nil {
+ m.OIDCConfig = &OIDCConfig{}
+ }
+ if err := m.OIDCConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RuntimeConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RuntimeConfig == nil {
+ m.RuntimeConfig = make(map[string]bool)
+ }
+ var mapkey string
+ var mapvalue bool
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapvaluetemp int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapvaluetemp |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ mapvalue = bool(mapvaluetemp != 0)
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.RuntimeConfig[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ServiceAccountConfig == nil {
+ m.ServiceAccountConfig = &ServiceAccountConfig{}
+ }
+ if err := m.ServiceAccountConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchCacheSizes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.WatchCacheSizes == nil {
+ m.WatchCacheSizes = &WatchCacheSizes{}
+ }
+ if err := m.WatchCacheSizes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Requests == nil {
+ m.Requests = &KubeAPIServerRequests{}
+ }
+ if err := m.Requests.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeAPIServerRequests) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeAPIServerRequests: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeAPIServerRequests: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxNonMutatingInflight", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxNonMutatingInflight = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxMutatingInflight", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxMutatingInflight = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeControllerManagerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeControllerManagerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HorizontalPodAutoscalerConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HorizontalPodAutoscalerConfig == nil {
+ m.HorizontalPodAutoscalerConfig = &HorizontalPodAutoscalerConfig{}
+ }
+ if err := m.HorizontalPodAutoscalerConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeCIDRMaskSize", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NodeCIDRMaskSize = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodEvictionTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodEvictionTimeout == nil {
+ m.PodEvictionTimeout = &v11.Duration{}
+ }
+ if err := m.PodEvictionTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeProxyConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ProxyMode(dAtA[iNdEx:postIndex])
+ m.Mode = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeSchedulerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeSchedulerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeMaxPDVols", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.KubeMaxPDVols = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUCFSQuota", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.CPUCFSQuota = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUManagerPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CPUManagerPolicy = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionHard", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionHard == nil {
+ m.EvictionHard = &KubeletConfigEviction{}
+ }
+ if err := m.EvictionHard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionMaxPodGracePeriod", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EvictionMaxPodGracePeriod = &v
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionMinimumReclaim", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionMinimumReclaim == nil {
+ m.EvictionMinimumReclaim = &KubeletConfigEvictionMinimumReclaim{}
+ }
+ if err := m.EvictionMinimumReclaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionPressureTransitionPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionPressureTransitionPeriod == nil {
+ m.EvictionPressureTransitionPeriod = &v11.Duration{}
+ }
+ if err := m.EvictionPressureTransitionPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoft", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionSoft == nil {
+ m.EvictionSoft = &KubeletConfigEviction{}
+ }
+ if err := m.EvictionSoft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoftGracePeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionSoftGracePeriod == nil {
+ m.EvictionSoftGracePeriod = &KubeletConfigEvictionSoftGracePeriod{}
+ }
+ if err := m.EvictionSoftGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxPods", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxPods = &v
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodPIDsLimit", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PodPIDsLimit = &v
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImagePullProgressDeadline", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImagePullProgressDeadline == nil {
+ m.ImagePullProgressDeadline = &v11.Duration{}
+ }
+ if err := m.ImagePullProgressDeadline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailSwapOn", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.FailSwapOn = &b
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeReserved", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeReserved == nil {
+ m.KubeReserved = &KubeletConfigReserved{}
+ }
+ if err := m.KubeReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemReserved", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SystemReserved == nil {
+ m.SystemReserved = &KubeletConfigReserved{}
+ }
+ if err := m.SystemReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigEviction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigEviction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.MemoryAvailable = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ImageFSAvailable = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ImageFSInodesFree = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeFSAvailable = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeFSInodesFree = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MemoryAvailable == nil {
+ m.MemoryAvailable = &resource.Quantity{}
+ }
+ if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSAvailable == nil {
+ m.ImageFSAvailable = &resource.Quantity{}
+ }
+ if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSInodesFree == nil {
+ m.ImageFSInodesFree = &resource.Quantity{}
+ }
+ if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSAvailable == nil {
+ m.NodeFSAvailable = &resource.Quantity{}
+ }
+ if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSInodesFree == nil {
+ m.NodeFSInodesFree = &resource.Quantity{}
+ }
+ if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MemoryAvailable == nil {
+ m.MemoryAvailable = &v11.Duration{}
+ }
+ if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSAvailable == nil {
+ m.ImageFSAvailable = &v11.Duration{}
+ }
+ if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSInodesFree == nil {
+ m.ImageFSInodesFree = &v11.Duration{}
+ }
+ if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSAvailable == nil {
+ m.NodeFSAvailable = &v11.Duration{}
+ }
+ if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSInodesFree == nil {
+ m.NodeFSInodesFree = &v11.Duration{}
+ }
+ if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigReserved) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigReserved: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigReserved: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CPU == nil {
+ m.CPU = &resource.Quantity{}
+ }
+ if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Memory == nil {
+ m.Memory = &resource.Quantity{}
+ }
+ if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EphemeralStorage", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EphemeralStorage == nil {
+ m.EphemeralStorage = &resource.Quantity{}
+ }
+ if err := m.EphemeralStorage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PID == nil {
+ m.PID = &resource.Quantity{}
+ }
+ if err := m.PID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Kubernetes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Kubernetes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Kubernetes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainers", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AllowPrivilegedContainers = &b
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterAutoscaler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterAutoscaler == nil {
+ m.ClusterAutoscaler = &ClusterAutoscaler{}
+ }
+ if err := m.ClusterAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeAPIServer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeAPIServer == nil {
+ m.KubeAPIServer = &KubeAPIServerConfig{}
+ }
+ if err := m.KubeAPIServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeControllerManager", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeControllerManager == nil {
+ m.KubeControllerManager = &KubeControllerManagerConfig{}
+ }
+ if err := m.KubeControllerManager.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeScheduler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeScheduler == nil {
+ m.KubeScheduler = &KubeSchedulerConfig{}
+ }
+ if err := m.KubeScheduler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeProxy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeProxy == nil {
+ m.KubeProxy = &KubeProxyConfig{}
+ }
+ if err := m.KubeProxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kubelet == nil {
+ m.Kubelet = &KubeletConfig{}
+ }
+ if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VerticalPodAutoscaler == nil {
+ m.VerticalPodAutoscaler = &VerticalPodAutoscaler{}
+ }
+ if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FeatureGates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FeatureGates == nil {
+ m.FeatureGates = make(map[string]bool)
+ }
+ var mapkey string
+ var mapvalue bool
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapvaluetemp int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapvaluetemp |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ mapvalue = bool(mapvaluetemp != 0)
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.FeatureGates[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesDashboard: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesDashboard: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthenticationMode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.AuthenticationMode = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesSettings) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesSettings: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesSettings: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, ExpirableVersion{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LastError) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LastError: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LastError: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.TaskID = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastUpdateTime == nil {
+ m.LastUpdateTime = &v11.Time{}
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LastOperation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LastOperation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LastOperation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType)
+ }
+ m.Progress = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Progress |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.State = LastOperationState(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = LastOperationType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Machine) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Machine: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Machine: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Image == nil {
+ m.Image = &ShootMachineImage{}
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineControllerManagerSettings) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineControllerManagerSettings: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineControllerManagerSettings: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineDrainTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineDrainTimeout == nil {
+ m.MachineDrainTimeout = &v11.Duration{}
+ }
+ if err := m.MachineDrainTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineHealthTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineHealthTimeout == nil {
+ m.MachineHealthTimeout = &v11.Duration{}
+ }
+ if err := m.MachineHealthTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineCreationTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineCreationTimeout == nil {
+ m.MachineCreationTimeout = &v11.Duration{}
+ }
+ if err := m.MachineCreationTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxEvictRetries", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxEvictRetries = &v
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeConditions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeConditions = append(m.NodeConditions, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineImage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineImage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineImage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, MachineImageVersion{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineImageVersion) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineImageVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineImageVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirableVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ExpirableVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CRI = append(m.CRI, CRI{})
+ if err := m.CRI[len(m.CRI)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineType) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineType: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineType: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GPU", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.GPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Storage == nil {
+ m.Storage = &MachineTypeStorage{}
+ }
+ if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Usable = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineTypeStorage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineTypeStorage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Class = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StorageSize", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.StorageSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Maintenance) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Maintenance: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Maintenance: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AutoUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AutoUpdate == nil {
+ m.AutoUpdate = &MaintenanceAutoUpdate{}
+ }
+ if err := m.AutoUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeWindow", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TimeWindow == nil {
+ m.TimeWindow = &MaintenanceTimeWindow{}
+ }
+ if err := m.TimeWindow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfineSpecUpdateRollout", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ConfineSpecUpdateRollout = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MaintenanceAutoUpdate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MaintenanceAutoUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.KubernetesVersion = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineImageVersion", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MachineImageVersion = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MaintenanceTimeWindow: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MaintenanceTimeWindow: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Begin = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.End = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Monitoring) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Monitoring: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Monitoring: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alerting", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alerting == nil {
+ m.Alerting = &Alerting{}
+ }
+ if err := m.Alerting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedResourceReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedResourceReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedResourceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ResourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Networking) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Networking: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Networking: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Pods = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Nodes = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Services = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxIngress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxIngress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxIngress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Config == nil {
+ m.Config = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Config[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex])
+ m.ExternalTrafficPolicy = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OIDCConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OIDCConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OIDCConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CABundle = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientAuthentication", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientAuthentication == nil {
+ m.ClientAuthentication = &OpenIDConnectClientAuthentication{}
+ }
+ if err := m.ClientAuthentication.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ClientID = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsClaim", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.GroupsClaim = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsPrefix", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.GroupsPrefix = &s
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IssuerURL", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.IssuerURL = &s
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredClaims", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RequiredClaims == nil {
+ m.RequiredClaims = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.RequiredClaims[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SigningAlgs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SigningAlgs = append(m.SigningAlgs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UsernameClaim", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.UsernameClaim = &s
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UsernamePrefix", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.UsernamePrefix = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OpenIDConnectClientAuthentication: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OpenIDConnectClientAuthentication: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExtraConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExtraConfig == nil {
+ m.ExtraConfig = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.ExtraConfig[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Secret = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Plant) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Plant: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Plant: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PlantList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PlantList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PlantList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Plant{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PlantSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PlantSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PlantSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Endpoints = append(m.Endpoints, Endpoint{})
+ if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PlantStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PlantStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PlantStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ObservedGeneration = &v
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterInfo == nil {
+ m.ClusterInfo = &ClusterInfo{}
+ }
+ if err := m.ClusterInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Project) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Project: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Project{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectMember) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectMember: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectMember: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CreatedBy == nil {
+ m.CreatedBy = &v13.Subject{}
+ }
+ if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Description = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Owner == nil {
+ m.Owner = &v13.Subject{}
+ }
+ if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Purpose = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, ProjectMember{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Namespace = &s
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Tolerations == nil {
+ m.Tolerations = &ProjectTolerations{}
+ }
+ if err := m.Tolerations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = ProjectPhase(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StaleSinceTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StaleSinceTimestamp == nil {
+ m.StaleSinceTimestamp = &v11.Time{}
+ }
+ if err := m.StaleSinceTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StaleAutoDeleteTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StaleAutoDeleteTimestamp == nil {
+ m.StaleAutoDeleteTimestamp = &v11.Time{}
+ }
+ if err := m.StaleAutoDeleteTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectTolerations) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectTolerations: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectTolerations: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Defaults", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Defaults = append(m.Defaults, Toleration{})
+ if err := m.Defaults[len(m.Defaults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Whitelist", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Whitelist = append(m.Whitelist, Toleration{})
+ if err := m.Whitelist[len(m.Whitelist)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Provider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Provider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ControlPlaneConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ControlPlaneConfig == nil {
+ m.ControlPlaneConfig = &runtime.RawExtension{}
+ }
+ if err := m.ControlPlaneConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InfrastructureConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.InfrastructureConfig == nil {
+ m.InfrastructureConfig = &runtime.RawExtension{}
+ }
+ if err := m.InfrastructureConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Workers = append(m.Workers, Worker{})
+ if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Quota) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Quota: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QuotaList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QuotaList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QuotaList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Quota{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QuotaSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QuotaSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterLifetimeDays", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ClusterLifetimeDays = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Metrics == nil {
+ m.Metrics = make(k8s_io_api_core_v1.ResourceList)
+ }
+ var mapkey k8s_io_api_core_v1.ResourceName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Metrics[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Region) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Region: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Zones = append(m.Zones, AvailabilityZone{})
+ if err := m.Zones[len(m.Zones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CrossVersionObjectReference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CrossVersionObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceWatchCacheSize) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceWatchCacheSize: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceWatchCacheSize: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.APIGroup = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CacheSize", wireType)
+ }
+ m.CacheSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CacheSize |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quotas", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Quotas = append(m.Quotas, v1.ObjectReference{})
+ if err := m.Quotas[len(m.Quotas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretBindingList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, SecretBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Seed) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Seed: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Seed: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedBackup) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedBackup: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedBackup: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Provider = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Region = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedDNS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedDNS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedDNS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressDomain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.IngressDomain = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Provider == nil {
+ m.Provider = &SeedDNSProvider{}
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedDNSProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedDNSProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedDNSProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Domains == nil {
+ m.Domains = &DNSIncludeExclude{}
+ }
+ if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Zones == nil {
+ m.Zones = &DNSIncludeExclude{}
+ }
+ if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Seed{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedNetworks) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedNetworks: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedNetworks: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Nodes = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Pods = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Services = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ShootDefaults", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ShootDefaults == nil {
+ m.ShootDefaults = &ShootNetworks{}
+ }
+ if err := m.ShootDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LabelSelector == nil {
+ m.LabelSelector = &v11.LabelSelector{}
+ }
+ if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProviderTypes = append(m.ProviderTypes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingExcessCapacityReservation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingLoadBalancerServices) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingLoadBalancerServices: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingLoadBalancerServices: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingScheduling) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingScheduling: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingScheduling: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Visible", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Visible = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingShootDNS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingShootDNS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingShootDNS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingVerticalPodAutoscaler) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettings) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettings: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettings: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExcessCapacityReservation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExcessCapacityReservation == nil {
+ m.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{}
+ }
+ if err := m.ExcessCapacityReservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Scheduling == nil {
+ m.Scheduling = &SeedSettingScheduling{}
+ }
+ if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ShootDNS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ShootDNS == nil {
+ m.ShootDNS = &SeedSettingShootDNS{}
+ }
+ if err := m.ShootDNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerServices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LoadBalancerServices == nil {
+ m.LoadBalancerServices = &SeedSettingLoadBalancerServices{}
+ }
+ if err := m.LoadBalancerServices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VerticalPodAutoscaler == nil {
+ m.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{}
+ }
+ if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Backup", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Backup == nil {
+ m.Backup = &SeedBackup{}
+ }
+ if err := m.Backup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BlockCIDRs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BlockCIDRs = append(m.BlockCIDRs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Networks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &v1.SecretReference{}
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Taints = append(m.Taints, SeedTaint{})
+ if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Volume == nil {
+ m.Volume = &SeedVolume{}
+ }
+ if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Settings == nil {
+ m.Settings = &SeedSettings{}
+ }
+ if err := m.Settings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Ingress == nil {
+ m.Ingress = &Ingress{}
+ }
+ if err := m.Ingress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Gardener == nil {
+ m.Gardener = &Gardener{}
+ }
+ if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.KubernetesVersion = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ClusterIdentity = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedTaint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedTaint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedTaint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Value = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedVolume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinimumSize", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MinimumSize == nil {
+ m.MinimumSize = &resource.Quantity{}
+ }
+ if err := m.MinimumSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Providers = append(m.Providers, SeedVolumeProvider{})
+ if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedVolumeProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedVolumeProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Purpose = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccountConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccountConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Issuer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Issuer = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SigningKeySecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SigningKeySecret == nil {
+ m.SigningKeySecret = &v1.LocalObjectReference{}
+ }
+ if err := m.SigningKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Shoot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Shoot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Shoot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Shoot{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootMachineImage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootMachineImage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootMachineImage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Version = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootNetworks) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootNetworks: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootNetworks: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Pods = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Services = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addons", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Addons == nil {
+ m.Addons = &Addons{}
+ }
+ if err := m.Addons.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CloudProfileName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CloudProfileName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DNS == nil {
+ m.DNS = &DNS{}
+ }
+ if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Extensions = append(m.Extensions, Extension{})
+ if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hibernation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Hibernation == nil {
+ m.Hibernation = &Hibernation{}
+ }
+ if err := m.Hibernation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networking", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Networking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Maintenance", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Maintenance == nil {
+ m.Maintenance = &Maintenance{}
+ }
+ if err := m.Maintenance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Monitoring", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Monitoring == nil {
+ m.Monitoring = &Monitoring{}
+ }
+ if err := m.Monitoring.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ShootPurpose(dAtA[iNdEx:postIndex])
+ m.Purpose = &s
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretBindingName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretBindingName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SeedName = &s
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SeedSelector == nil {
+ m.SeedSelector = &SeedSelector{}
+ }
+ if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, NamedResourceReference{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tolerations = append(m.Tolerations, Toleration{})
+ if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootStateList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootStateList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootStateList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ShootState{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootStateSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootStateSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootStateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Gardener = append(m.Gardener, GardenerResourceData{})
+ if err := m.Gardener[len(m.Gardener)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Extensions = append(m.Extensions, ExtensionResourceState{})
+ if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, ResourceData{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Constraints = append(m.Constraints, Condition{})
+ if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsHibernated", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsHibernated = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastOperation == nil {
+ m.LastOperation = &LastOperation{}
+ }
+ if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastError == nil {
+ m.LastError = &LastError{}
+ }
+ if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastErrors", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LastErrors = append(m.LastErrors, LastError{})
+ if err := m.LastErrors[len(m.LastErrors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RetryCycleStartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RetryCycleStartTime == nil {
+ m.RetryCycleStartTime = &v11.Time{}
+ }
+ if err := m.RetryCycleStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Seed", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Seed = &s
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TechnicalID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TechnicalID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ClusterIdentity = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Toleration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Toleration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Value = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VerticalPodAutoscaler) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VerticalPodAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictAfterOOMThreshold", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictAfterOOMThreshold == nil {
+ m.EvictAfterOOMThreshold = &v11.Duration{}
+ }
+ if err := m.EvictAfterOOMThreshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateBurst", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EvictionRateBurst = &v
+ case 4:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateLimit", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.EvictionRateLimit = &v2
+ case 5:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionTolerance", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.EvictionTolerance = &v2
+ case 6:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecommendationMarginFraction", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.RecommendationMarginFraction = &v2
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdaterInterval", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpdaterInterval == nil {
+ m.UpdaterInterval = &v11.Duration{}
+ }
+ if err := m.UpdaterInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecommenderInterval", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RecommenderInterval == nil {
+ m.RecommenderInterval = &v11.Duration{}
+ }
+ if err := m.RecommenderInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Volume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Volume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Name = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Type = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeSize = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Encrypted = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VolumeType) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VolumeType: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VolumeType: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Class = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Usable = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCacheSizes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCacheSizes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCacheSizes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Default = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, ResourceWatchCacheSize{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Worker) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Worker: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Worker: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CABundle = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CRI == nil {
+ m.CRI = &CRI{}
+ }
+ if err := m.CRI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kubernetes == nil {
+ m.Kubernetes = &WorkerKubernetes{}
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Machine", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Machine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType)
+ }
+ m.Maximum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Maximum |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType)
+ }
+ m.Minimum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Minimum |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Taints = append(m.Taints, v1.Taint{})
+ if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Volume == nil {
+ m.Volume = &Volume{}
+ }
+ if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataVolumes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DataVolumes = append(m.DataVolumes, DataVolume{})
+ if err := m.DataVolumes[len(m.DataVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeletDataVolumeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.KubeletDataVolumeName = &s
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Zones = append(m.Zones, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 18:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemComponents", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SystemComponents == nil {
+ m.SystemComponents = &WorkerSystemComponents{}
+ }
+ if err := m.SystemComponents.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 19:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineControllerManagerSettings", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineControllerManagerSettings == nil {
+ m.MachineControllerManagerSettings = &MachineControllerManagerSettings{}
+ }
+ if err := m.MachineControllerManagerSettings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WorkerKubernetes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkerKubernetes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkerKubernetes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kubelet == nil {
+ m.Kubelet = &KubeletConfig{}
+ }
+ if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WorkerSystemComponents) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkerSystemComponents: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkerSystemComponents: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allow", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Allow = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto
new file mode 100644
index 0000000..b50c3c8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto
@@ -0,0 +1,2344 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package github.com.gardener.gardener.pkg.apis.core.v1alpha1;
+
+import "github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto";
+import "k8s.io/api/autoscaling/v1/generated.proto";
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/api/rbac/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// Addon allows enabling or disabling a specific addon and is used to derive from.
+message Addon {
+ // Enabled indicates whether the addon is enabled or not.
+ optional bool enabled = 1;
+}
+
+// Addons is a collection of configuration for specific addons which are managed by the Gardener.
+message Addons {
+ // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon.
+ // +optional
+ optional KubernetesDashboard kubernetesDashboard = 1;
+
+ // NginxIngress holds configuration settings for the nginx-ingress addon.
+ // +optional
+ optional NginxIngress nginxIngress = 2;
+}
+
+// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration.
+message AdmissionPlugin {
+ // Name is the name of the plugin.
+ optional string name = 1;
+
+ // Config is the configuration of the plugin.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension config = 2;
+}
+
+// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how).
+message Alerting {
+ // MonitoringEmailReceivers is a list of recipients for alerts
+ // +optional
+ repeated string emailReceivers = 1;
+}
+
+// AuditConfig contains settings for audit of the api server
+message AuditConfig {
+ // AuditPolicy contains configuration settings for audit policy of the kube-apiserver.
+ // +optional
+ optional AuditPolicy auditPolicy = 1;
+}
+
+// AuditPolicy contains audit policy for kube-apiserver
+message AuditPolicy {
+ // ConfigMapRef is a reference to a ConfigMap object in the same namespace,
+ // which contains the audit policy for the kube-apiserver.
+ // +optional
+ optional k8s.io.api.core.v1.ObjectReference configMapRef = 1;
+}
+
+// AvailabilityZone is an availability zone.
+message AvailabilityZone {
+ // Name is an an availability zone name.
+ optional string name = 1;
+
+ // UnavailableMachineTypes is a list of machine type names that are not availability in this zone.
+ // +optional
+ repeated string unavailableMachineTypes = 2;
+
+ // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone.
+ // +optional
+ repeated string unavailableVolumeTypes = 3;
+}
+
+// BackupBucket holds details about backup bucket
+message BackupBucket {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the Backup Bucket.
+ optional BackupBucketSpec spec = 2;
+
+ // Most recently observed status of the Backup Bucket.
+ optional BackupBucketStatus status = 3;
+}
+
+// BackupBucketList is a list of BackupBucket objects.
+message BackupBucketList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of BackupBucket.
+ repeated BackupBucket items = 2;
+}
+
+// BackupBucketProvider holds the details of cloud provider of the object store.
+message BackupBucketProvider {
+ // Type is the type of provider.
+ optional string type = 1;
+
+ // Region is the region of the bucket.
+ optional string region = 2;
+}
+
+// BackupBucketSpec is the specification of a Backup Bucket.
+message BackupBucketSpec {
+ // Provider hold the details of cloud provider of the object store.
+ optional BackupBucketProvider provider = 1;
+
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 3;
+
+ // Seed holds the name of the seed allocated to BackupBucket for running controller.
+ // +optional
+ optional string seed = 4;
+}
+
+// BackupBucketStatus holds the most recently observed status of the Backup Bucket.
+message BackupBucketStatus {
+ // ProviderStatus is the configuration passed to BackupBucket resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 1;
+
+ // LastOperation holds information about the last operation on the BackupBucket.
+ // +optional
+ optional LastOperation lastOperation = 2;
+
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ optional LastError lastError = 3;
+
+ // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the
+ // BackupBucket's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 4;
+
+ // GeneratedSecretRef is reference to the secret generated by backup bucket, which
+ // will have object store specific credentials.
+ // +optional
+ optional k8s.io.api.core.v1.SecretReference generatedSecretRef = 5;
+}
+
+// BackupEntry holds details about shoot backup.
+message BackupEntry {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of the Backup Entry.
+ // +optional
+ optional BackupEntrySpec spec = 2;
+
+ // Status contains the most recently observed status of the Backup Entry.
+ // +optional
+ optional BackupEntryStatus status = 3;
+}
+
+// BackupEntryList is a list of BackupEntry objects.
+message BackupEntryList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of BackupEntry.
+ repeated BackupEntry items = 2;
+}
+
+// BackupEntrySpec is the specification of a Backup Entry.
+message BackupEntrySpec {
+ // BucketName is the name of backup bucket for this Backup Entry.
+ optional string bucketName = 1;
+
+ // Seed holds the name of the seed allocated to BackupEntry for running controller.
+ // +optional
+ optional string seed = 2;
+}
+
+// BackupEntryStatus holds the most recently observed status of the Backup Entry.
+message BackupEntryStatus {
+ // LastOperation holds information about the last operation on the BackupEntry.
+ // +optional
+ optional LastOperation lastOperation = 1;
+
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ optional LastError lastError = 2;
+
+ // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the
+ // BackupEntry's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 3;
+}
+
+// CRI contains information about the Container Runtimes.
+message CRI {
+ // The name of the CRI library
+ optional string name = 1;
+
+ // ContainerRuntimes is the list of the required container runtimes supported for a worker pool.
+ // +optional
+ repeated ContainerRuntime containerRuntimes = 2;
+}
+
+// CloudInfo contains information about the cloud
+message CloudInfo {
+ // Type is the cloud type
+ optional string type = 1;
+
+ // Region is the cloud region
+ optional string region = 2;
+}
+
+// CloudProfile represents certain properties about a provider environment.
+message CloudProfile {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the provider environment properties.
+ // +optional
+ optional CloudProfileSpec spec = 2;
+}
+
+// CloudProfileList is a collection of CloudProfiles.
+message CloudProfileList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of CloudProfiles.
+ repeated CloudProfile items = 2;
+}
+
+// CloudProfileSpec is the specification of a CloudProfile.
+// It must contain exactly one of its defined keys.
+message CloudProfileSpec {
+ // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile.
+ // +optional
+ optional string caBundle = 1;
+
+ // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+ optional KubernetesSettings kubernetes = 2;
+
+ // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated MachineImage machineImages = 3;
+
+ // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated MachineType machineTypes = 4;
+
+ // ProviderConfig contains provider-specific configuration for the profile.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 5;
+
+ // Regions contains constraints regarding allowed values for regions and zones.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated Region regions = 6;
+
+ // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile.
+ // An empty list means that all seeds of the same provider type are supported.
+ // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes.
+ // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider
+ // type of the seed must match the shoot's provider.
+ // +optional
+ optional SeedSelector seedSelector = 7;
+
+ // Type is the name of the provider.
+ optional string type = 8;
+
+ // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated VolumeType volumeTypes = 9;
+}
+
+// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+message ClusterAutoscaler {
+ // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterAdd = 1;
+
+ // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterDelete = 2;
+
+ // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterFailure = 3;
+
+ // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownUnneededTime = 4;
+
+ // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed
+ // +optional
+ optional double scaleDownUtilizationThreshold = 5;
+
+ // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scanInterval = 6;
+}
+
+// ClusterInfo contains information about the Plant cluster
+message ClusterInfo {
+ // Cloud describes the cloud information
+ optional CloudInfo cloud = 1;
+
+ // Kubernetes describes kubernetes meta information (e.g., version)
+ optional KubernetesInfo kubernetes = 2;
+}
+
+// Condition holds the information about the state of a resource.
+message Condition {
+ // Type of the Shoot condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Last time the condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
+
+ // The reason for the condition's last transition.
+ optional string reason = 5;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 6;
+
+ // Well-defined error codes in case the condition reports a problem.
+ // +optional
+ repeated string codes = 7;
+}
+
+// ContainerRuntime contains information about worker's available container runtime
+message ContainerRuntime {
+ // Type is the type of the Container Runtime.
+ optional string name = 1;
+
+ // ProviderConfig is the configuration passed to the ContainerRuntime resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+}
+
+// ControllerDeployment contains information for how this controller is deployed.
+message ControllerDeployment {
+ // Type is the deployment type.
+ optional string type = 1;
+
+ // ProviderConfig contains type-specific configuration.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Policy controls how the controller is deployed. It defaults to 'OnDemand'.
+ // +optional
+ optional string policy = 3;
+
+ // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be
+ // considered for a deployment.
+ // An empty list means that all seeds are selected.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector seedSelector = 4;
+}
+
+// ControllerInstallation represents an installation request for an external controller.
+message ControllerInstallation {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this installation.
+ optional ControllerInstallationSpec spec = 2;
+
+ // Status contains the status of this installation.
+ optional ControllerInstallationStatus status = 3;
+}
+
+// ControllerInstallationList is a collection of ControllerInstallations.
+message ControllerInstallationList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ControllerInstallations.
+ repeated ControllerInstallation items = 2;
+}
+
+// ControllerInstallationSpec is the specification of a ControllerInstallation.
+message ControllerInstallationSpec {
+ // RegistrationRef is used to reference a ControllerRegistration resources.
+ optional k8s.io.api.core.v1.ObjectReference registrationRef = 1;
+
+ // SeedRef is used to reference a Seed resources.
+ optional k8s.io.api.core.v1.ObjectReference seedRef = 2;
+}
+
+// ControllerInstallationStatus is the status of a ControllerInstallation.
+message ControllerInstallationStatus {
+ // Conditions represents the latest available observations of a ControllerInstallations's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 1;
+
+ // ProviderStatus contains type-specific status.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 2;
+}
+
+// ControllerRegistration represents a registration of an external controller.
+message ControllerRegistration {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this registration.
+ optional ControllerRegistrationSpec spec = 2;
+}
+
+// ControllerRegistrationList is a collection of ControllerRegistrations.
+message ControllerRegistrationList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ControllerRegistrations.
+ repeated ControllerRegistration items = 2;
+}
+
+// ControllerRegistrationSpec is the specification of a ControllerRegistration.
+message ControllerRegistrationSpec {
+ // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types
+ // (aws-route53, gcp, auditlog, ...).
+ // +optional
+ repeated ControllerResource resources = 1;
+
+ // Deployment contains information for how this controller is deployed.
+ // +optional
+ optional ControllerDeployment deployment = 2;
+}
+
+// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this
+// kind (aws-route53, gcp, auditlog, ...).
+message ControllerResource {
+ // Kind is the resource kind, for example "OperatingSystemConfig".
+ optional string kind = 1;
+
+ // Type is the resource type, for example "coreos" or "ubuntu".
+ optional string type = 2;
+
+ // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters.
+ // +optional
+ optional bool globallyEnabled = 3;
+
+ // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration reconcileTimeout = 4;
+
+ // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension
+ // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type
+ // combination.
+ // +optional
+ optional bool primary = 5;
+}
+
+// DNS holds information about the provider, the hosted zone id and the domain.
+message DNS {
+ // Domain is the external available domain of the Shoot cluster. This domain will be written into the
+ // kubeconfig that is handed out to end-users. Once set it is immutable.
+ // +optional
+ optional string domain = 1;
+
+ // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if
+ // not a default domain is used.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated DNSProvider providers = 2;
+}
+
+message DNSIncludeExclude {
+ // Include is a list of resources that shall be included.
+ // +optional
+ repeated string include = 1;
+
+ // Exclude is a list of resources that shall be excluded.
+ // +optional
+ repeated string exclude = 2;
+}
+
+// DNSProvider contains information about a DNS provider.
+message DNSProvider {
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude domains = 1;
+
+ // Primary indicates that this DNSProvider is used for shoot related domains.
+ // +optional
+ optional bool primary = 2;
+
+ // SecretName is a name of a secret containing credentials for the stated domain and the
+ // provider. When not specified, the Gardener will use the cloud provider credentials referenced
+ // by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override
+ // this behavior, i.e. forcing the Gardener to only look into the given secret.
+ // +optional
+ optional string secretName = 3;
+
+ // Type is the DNS provider type.
+ // +optional
+ optional string type = 4;
+
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude zones = 5;
+}
+
+// DataVolume contains information about a data volume.
+message DataVolume {
+ // Name of the volume to make it referencable.
+ optional string name = 1;
+
+ // Type is the type of the volume.
+ // +optional
+ optional string type = 2;
+
+ // VolumeSize is the size of the volume.
+ optional string size = 3;
+
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ optional bool encrypted = 4;
+}
+
+// Endpoint is an endpoint for monitoring, logging and other services around the plant.
+message Endpoint {
+ // Name is the name of the endpoint
+ optional string name = 1;
+
+ // URL is the url of the endpoint
+ optional string url = 2;
+
+ // Purpose is the purpose of the endpoint
+ optional string purpose = 3;
+}
+
+// ExpirableVersion contains a version and an expiration date.
+message ExpirableVersion {
+ // Version is the version identifier.
+ optional string version = 1;
+
+ // ExpirationDate defines the time at which this version expires.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationDate = 2;
+
+ // Classification defines the state of a version (preview, supported, deprecated)
+ // +optional
+ optional string classification = 3;
+}
+
+// Extension contains type and provider information for Shoot extensions.
+message Extension {
+ // Type is the type of the extension resource.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to extension resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators.
+ // +optional
+ optional bool disabled = 3;
+}
+
+// ExtensionResourceState contains the kind of the extension custom resource and its last observed state in the Shoot's
+// namespace on the Seed cluster.
+message ExtensionResourceState {
+ // Kind (type) of the extension custom resource
+ optional string kind = 1;
+
+ // Name of the extension custom resource
+ // +optional
+ optional string name = 2;
+
+ // Purpose of the extension custom resource
+ // +optional
+ optional string purpose = 3;
+
+ // State of the extension resource
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension state = 4;
+
+ // Resources holds a list of named resource references that can be referred to in the state by their names.
+ // +optional
+ repeated github.com.gardener.gardener.pkg.apis.core.v1beta1.NamedResourceReference resources = 5;
+}
+
+// Gardener holds the information about the Gardener version that operated a resource.
+message Gardener {
+ // ID is the Docker container id of the Gardener which last acted on a resource.
+ optional string id = 1;
+
+ // Name is the hostname (pod name) of the Gardener which last acted on a resource.
+ optional string name = 2;
+
+ // Version is the version of the Gardener which last acted on a resource.
+ optional string version = 3;
+}
+
+// GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane.
+message GardenerResourceData {
+ // Name of the object required to generate resources
+ optional string name = 1;
+
+ // Type of the object
+ optional string type = 2;
+
+ // Data contains the payload required to generate resources
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 3;
+}
+
+// Hibernation contains information whether the Shoot is suspended or not.
+message Hibernation {
+ // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated.
+ // If it is false or nil, the Shoot's desired state is to be awaken.
+ // +optional
+ optional bool enabled = 1;
+
+ // Schedules determine the hibernation schedules.
+ // +optional
+ repeated HibernationSchedule schedules = 2;
+}
+
+// HibernationSchedule determines the hibernation schedule of a Shoot.
+// A Shoot will be regularly hibernated at each start time and will be woken up at each end time.
+// Start or End can be omitted, though at least one of each has to be specified.
+message HibernationSchedule {
+ // Start is a Cron spec at which time a Shoot will be hibernated.
+ // +optional
+ optional string start = 1;
+
+ // End is a Cron spec at which time a Shoot will be woken up.
+ // +optional
+ optional string end = 2;
+
+ // Location is the time location in which both start and and shall be evaluated.
+ // +optional
+ optional string location = 3;
+}
+
+// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+// Note: Descriptions were taken from the Kubernetes documentation.
+message HorizontalPodAutoscalerConfig {
+ // The period after which a ready pod transition is considered to be the first.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration cpuInitializationPeriod = 1;
+
+ // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration downscaleDelay = 2;
+
+ // The configurable window at which the controller will choose the highest recommendation for autoscaling.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration downscaleStabilization = 3;
+
+ // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration initialReadinessDelay = 4;
+
+ // The period for syncing the number of pods in horizontal pod autoscaler.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration syncPeriod = 5;
+
+ // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.
+ // +optional
+ optional double tolerance = 6;
+
+ // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration upscaleDelay = 7;
+}
+
+// Ingress configures the Ingress specific settings of the Seed cluster.
+message Ingress {
+ // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ optional string domain = 1;
+
+ // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain
+ optional IngressController controller = 2;
+}
+
+// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain
+message IngressController {
+ // Kind defines which kind of IngressController to use, for example `nginx`
+ optional string kind = 1;
+
+ // ProviderConfig specifies infrastructure specific configuration for the ingressController
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+}
+
+// KubeAPIServerConfig contains configuration settings for the kube-apiserver.
+message KubeAPIServerConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding
+ // configuration.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated AdmissionPlugin admissionPlugins = 2;
+
+ // APIAudiences are the identifiers of the API. The service account token authenticator will
+ // validate that tokens used against the API are bound to at least one of these audiences.
+ // Defaults to ["kubernetes"].
+ // +optional
+ repeated string apiAudiences = 3;
+
+ // AuditConfig contains configuration settings for the audit of the kube-apiserver.
+ // +optional
+ optional AuditConfig auditConfig = 4;
+
+ // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not.
+ // +optional
+ optional bool enableBasicAuthentication = 5;
+
+ // OIDCConfig contains configuration settings for the OIDC provider.
+ // +optional
+ optional OIDCConfig oidcConfig = 6;
+
+ // RuntimeConfig contains information about enabled or disabled APIs.
+ // +optional
+ map runtimeConfig = 7;
+
+ // ServiceAccountConfig contains configuration settings for the service account handling
+ // of the kube-apiserver.
+ // +optional
+ optional ServiceAccountConfig serviceAccountConfig = 8;
+
+ // WatchCacheSizes contains configuration of the API server's watch cache sizes.
+ // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests
+ // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's
+ // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it
+ // might happen that controller watches are permanently stopped with `too old resource version` errors.
+ // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch
+ // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache).
+ // +optional
+ optional WatchCacheSizes watchCacheSizes = 9;
+
+ // Requests contains configuration for request-specific settings for the kube-apiserver.
+ // +optional
+ optional KubeAPIServerRequests requests = 10;
+}
+
+// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver.
+message KubeAPIServerRequests {
+ // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ optional int32 maxNonMutatingInflight = 1;
+
+ // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ optional int32 maxMutatingInflight = 2;
+}
+
+// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.
+message KubeControllerManagerConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+ // +optional
+ optional HorizontalPodAutoscalerConfig horizontalPodAutoscaler = 2;
+
+ // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24)
+ // +optional
+ optional int32 nodeCIDRMaskSize = 3;
+
+ // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration podEvictionTimeout = 4;
+}
+
+// KubeProxyConfig contains configuration settings for the kube-proxy.
+message KubeProxyConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // Mode specifies which proxy mode to use.
+ // defaults to IPTables.
+ // +optional
+ optional string mode = 2;
+}
+
+// KubeSchedulerConfig contains configuration settings for the kube-scheduler.
+message KubeSchedulerConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler.
+ // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits
+ // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware
+ // of all the side-effects and consequences when changing it.
+ // +optional
+ optional string kubeMaxPDVols = 2;
+}
+
+// KubeletConfig contains configuration settings for the kubelet.
+message KubeletConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // CPUCFSQuota allows you to disable/enable CPU throttling for Pods.
+ // +optional
+ optional bool cpuCFSQuota = 2;
+
+ // CPUManagerPolicy allows to set alternative CPU management policies (default: none).
+ // +optional
+ optional string cpuManagerPolicy = 3;
+
+ // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "100Mi/1Gi/5%"
+ // nodefs.available: "5%"
+ // nodefs.inodesFree: "5%"
+ // imagefs.available: "5%"
+ // imagefs.inodesFree: "5%"
+ optional KubeletConfigEviction evictionHard = 4;
+
+ // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
+ // +optional
+ // Default: 90
+ optional int32 evictionMaxPodGracePeriod = 5;
+
+ // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure.
+ // +optional
+ // Default: 0 for each resource
+ optional KubeletConfigEvictionMinimumReclaim evictionMinimumReclaim = 6;
+
+ // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
+ // +optional
+ // Default: 4m0s
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictionPressureTransitionPeriod = 7;
+
+ // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "200Mi/1.5Gi/10%"
+ // nodefs.available: "10%"
+ // nodefs.inodesFree: "10%"
+ // imagefs.available: "10%"
+ // imagefs.inodesFree: "10%"
+ optional KubeletConfigEviction evictionSoft = 8;
+
+ // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: 1m30s
+ // nodefs.available: 1m30s
+ // nodefs.inodesFree: 1m30s
+ // imagefs.available: 1m30s
+ // imagefs.inodesFree: 1m30s
+ optional KubeletConfigEvictionSoftGracePeriod evictionSoftGracePeriod = 9;
+
+ // MaxPods is the maximum number of Pods that are allowed by the Kubelet.
+ // +optional
+ // Default: 110
+ optional int32 maxPods = 10;
+
+ // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet.
+ // +optional
+ optional int64 podPidsLimit = 11;
+
+ // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled.
+ // +optional
+ // Default: 1m
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imagePullProgressDeadline = 12;
+
+ // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true).
+ // +optional
+ optional bool failSwapOn = 13;
+
+ // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ // Default: cpu=80m,memory=1Gi,pid=20k
+ optional KubeletConfigReserved kubeReserved = 14;
+
+ // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ optional KubeletConfigReserved systemReserved = 15;
+}
+
+// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value.
+message KubeletConfigEviction {
+ // MemoryAvailable is the threshold for the free memory on the host server.
+ // +optional
+ optional string memoryAvailable = 1;
+
+ // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ optional string imageFSAvailable = 2;
+
+ // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem.
+ // +optional
+ optional string imageFSInodesFree = 3;
+
+ // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ optional string nodeFSAvailable = 4;
+
+ // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem.
+ // +optional
+ optional string nodeFSInodesFree = 5;
+}
+
+// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim.
+message KubeletConfigEvictionMinimumReclaim {
+ // MemoryAvailable is the threshold for the memory reclaim on the host server.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity memoryAvailable = 1;
+
+ // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity imageFSAvailable = 2;
+
+ // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity imageFSInodesFree = 3;
+
+ // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity nodeFSAvailable = 4;
+
+ // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity nodeFSInodesFree = 5;
+}
+
+// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds.
+message KubeletConfigEvictionSoftGracePeriod {
+ // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration memoryAvailable = 1;
+
+ // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imageFSAvailable = 2;
+
+ // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imageFSInodesFree = 3;
+
+ // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSAvailable = 4;
+
+ // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSInodesFree = 5;
+}
+
+// KubeletConfigReserved contains reserved resources for daemons
+message KubeletConfigReserved {
+ // CPU is the reserved cpu.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1;
+
+ // Memory is the reserved memory.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 2;
+
+ // EphemeralStorage is the reserved ephemeral-storage.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity ephemeralStorage = 3;
+
+ // PID is the reserved process-ids.
+ // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity pid = 4;
+}
+
+// Kubernetes contains the version and configuration variables for the Shoot control plane.
+message Kubernetes {
+ // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true).
+ // +optional
+ optional bool allowPrivilegedContainers = 1;
+
+ // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+ // +optional
+ optional ClusterAutoscaler clusterAutoscaler = 2;
+
+ // KubeAPIServer contains configuration settings for the kube-apiserver.
+ // +optional
+ optional KubeAPIServerConfig kubeAPIServer = 3;
+
+ // KubeControllerManager contains configuration settings for the kube-controller-manager.
+ // +optional
+ optional KubeControllerManagerConfig kubeControllerManager = 4;
+
+ // KubeScheduler contains configuration settings for the kube-scheduler.
+ // +optional
+ optional KubeSchedulerConfig kubeScheduler = 5;
+
+ // KubeProxy contains configuration settings for the kube-proxy.
+ // +optional
+ optional KubeProxyConfig kubeProxy = 6;
+
+ // Kubelet contains configuration settings for the kubelet.
+ // +optional
+ optional KubeletConfig kubelet = 7;
+
+ // Version is the semantic Kubernetes version to use for the Shoot cluster.
+ optional string version = 8;
+
+ // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+ // +optional
+ optional VerticalPodAutoscaler verticalPodAutoscaler = 9;
+}
+
+// KubernetesConfig contains common configuration fields for the control plane components.
+message KubernetesConfig {
+ // FeatureGates contains information about enabled feature gates.
+ // +optional
+ map featureGates = 1;
+}
+
+// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon.
+message KubernetesDashboard {
+ optional Addon addon = 2;
+
+ // AuthenticationMode defines the authentication mode for the kubernetes-dashboard.
+ // +optional
+ optional string authenticationMode = 1;
+}
+
+// KubernetesInfo contains the version and configuration variables for the Plant cluster.
+message KubernetesInfo {
+ // Version is the semantic Kubernetes version to use for the Plant cluster.
+ optional string version = 1;
+}
+
+// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+message KubernetesSettings {
+ // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters.
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ // +optional
+ repeated ExpirableVersion versions = 1;
+}
+
+// LastError indicates the last occurred error for an operation on a resource.
+message LastError {
+ // A human readable message indicating details about the last error.
+ optional string description = 1;
+
+ // ID of the task which caused this last error
+ // +optional
+ optional string taskID = 2;
+
+ // Well-defined error codes of the last error(s).
+ // +optional
+ repeated string codes = 3;
+
+ // Last time the error was reported
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
+}
+
+// LastOperation indicates the type and the state of the last operation, along with a description
+// message and a progress indicator.
+message LastOperation {
+ // A human readable message indicating details about the last operation.
+ optional string description = 1;
+
+ // Last time the operation state transitioned from one to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 2;
+
+ // The progress in percentage (0-100) of the last operation.
+ optional int32 progress = 3;
+
+ // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed.
+ optional string state = 4;
+
+ // Type of the last operation, one of Create, Reconcile, Delete.
+ optional string type = 5;
+}
+
+// Machine contains information about the machine type and image.
+message Machine {
+ // Type is the machine type of the worker group.
+ optional string type = 1;
+
+ // Image holds information about the machine image to use for all nodes of this pool. It will default to the
+ // latest version of the first image stated in the referenced CloudProfile if no value has been provided.
+ // +optional
+ optional ShootMachineImage image = 2;
+}
+
+// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+message MachineControllerManagerSettings {
+ // MachineDrainTimeout is the period after which machine is forcefully deleted.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineDrainTimeout = 1;
+
+ // MachineHealthTimeout is the period after which machine is declared failed.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineHealthTimeout = 2;
+
+ // MachineCreationTimeout is the period after which creation of the machine is declared failed.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineCreationTimeout = 3;
+
+ // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered.
+ // +optional
+ optional int32 maxEvictRetries = 4;
+
+ // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed.
+ // +optional
+ repeated string nodeConditions = 5;
+}
+
+// MachineImage defines the name and multiple versions of the machine image in any environment.
+message MachineImage {
+ // Name is the name of the image.
+ optional string name = 1;
+
+ // Versions contains versions, expiration dates and container runtimes of the machine image
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ repeated MachineImageVersion versions = 2;
+}
+
+// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces
+message MachineImageVersion {
+ optional ExpirableVersion expirableVersion = 1;
+
+ // CRI list of supported container runtime and interfaces supported by this version
+ // +optional
+ repeated CRI cri = 2;
+}
+
+// MachineType contains certain properties of a machine type.
+message MachineType {
+ // CPU is the number of CPUs for this machine type.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1;
+
+ // GPU is the number of GPUs for this machine type.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity gpu = 2;
+
+ // Memory is the amount of memory for this machine type.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 3;
+
+ // Name is the name of the machine type.
+ optional string name = 4;
+
+ // Storage is the amount of storage associated with the root volume of this machine type.
+ // +optional
+ optional MachineTypeStorage storage = 5;
+
+ // Usable defines if the machine type can be used for shoot clusters.
+ // +optional
+ optional bool usable = 6;
+}
+
+// MachineTypeStorage is the amount of storage associated with the root volume of this machine type.
+message MachineTypeStorage {
+ // Class is the class of the storage type.
+ optional string class = 1;
+
+ // StorageSize is the storage size.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity size = 2;
+
+ // Type is the type of the storage.
+ optional string type = 3;
+}
+
+// Maintenance contains information about the time window for maintenance operations and which
+// operations should be performed.
+message Maintenance {
+ // AutoUpdate contains information about which constraints should be automatically updated.
+ // +optional
+ optional MaintenanceAutoUpdate autoUpdate = 1;
+
+ // TimeWindow contains information about the time window for maintenance operations.
+ // +optional
+ optional MaintenanceTimeWindow timeWindow = 2;
+
+ // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately.
+ // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger
+ // an immediate roll out which is changes to the Spec.Hibernation.Enabled field.
+ // +optional
+ optional bool confineSpecUpdateRollout = 3;
+}
+
+// MaintenanceAutoUpdate contains information about which constraints should be automatically updated.
+message MaintenanceAutoUpdate {
+ // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true).
+ optional bool kubernetesVersion = 1;
+
+ // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true).
+ optional bool machineImageVersion = 2;
+}
+
+// MaintenanceTimeWindow contains information about the time window for maintenance operations.
+message MaintenanceTimeWindow {
+ // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, a random value will be computed.
+ optional string begin = 1;
+
+ // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, the value will be computed based on the "Begin" value.
+ optional string end = 2;
+}
+
+// Monitoring contains information about the monitoring configuration for the shoot.
+message Monitoring {
+ // Alerting contains information about the alerting configuration for the shoot cluster.
+ // +optional
+ optional Alerting alerting = 1;
+}
+
+// NamedResourceReference is a named reference to a resource.
+message NamedResourceReference {
+ // Name of the resource reference.
+ optional string name = 1;
+
+ // ResourceRef is a reference to a resource.
+ optional k8s.io.api.autoscaling.v1.CrossVersionObjectReference resourceRef = 2;
+}
+
+// Networking defines networking parameters for the shoot cluster.
+message Networking {
+ // Type identifies the type of the networking plugin.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to network resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Pods is the CIDR of the pod network.
+ // +optional
+ optional string pods = 3;
+
+ // Nodes is the CIDR of the entire node network.
+ // +optional
+ optional string nodes = 4;
+
+ // Services is the CIDR of the service network.
+ // +optional
+ optional string services = 5;
+}
+
+// NginxIngress describes configuration values for the nginx-ingress addon.
+message NginxIngress {
+ optional Addon addon = 4;
+
+ // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress
+ // +optional
+ repeated string loadBalancerSourceRanges = 1;
+
+ // Config contains custom configuration for the nginx-ingress-controller configuration.
+ // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options
+ // +optional
+ map config = 2;
+
+ // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service`
+ // exposing the nginx-ingress. Defaults to `Cluster`.
+ // +optional
+ optional string externalTrafficPolicy = 3;
+}
+
+// OIDCConfig contains configuration settings for the OIDC provider.
+// Note: Descriptions were taken from the Kubernetes documentation.
+message OIDCConfig {
+ // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.
+ // +optional
+ optional string caBundle = 1;
+
+ // ClientAuthentication can optionally contain client configuration used for kubeconfig generation.
+ // +optional
+ optional OpenIDConnectClientAuthentication clientAuthentication = 2;
+
+ // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.
+ // +optional
+ optional string clientID = 3;
+
+ // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.
+ // +optional
+ optional string groupsClaim = 4;
+
+ // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.
+ // +optional
+ optional string groupsPrefix = 5;
+
+ // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).
+ // +optional
+ optional string issuerURL = 6;
+
+ // ATTENTION: Only meaningful for Kubernetes >= 1.11
+ // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value.
+ // +optional
+ map requiredClaims = 7;
+
+ // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1
+ // +optional
+ repeated string signingAlgs = 8;
+
+ // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub")
+ // +optional
+ optional string usernameClaim = 9;
+
+ // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
+ // +optional
+ optional string usernamePrefix = 10;
+}
+
+// OpenIDConnectClientAuthentication contains configuration for OIDC clients.
+message OpenIDConnectClientAuthentication {
+ // Extra configuration added to kubeconfig's auth-provider.
+ // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token
+ // +optional
+ map extraConfig = 1;
+
+ // The client Secret for the OpenID Connect client.
+ // +optional
+ optional string secret = 2;
+}
+
+message Plant {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this Plant.
+ optional PlantSpec spec = 2;
+
+ // Status contains the status of this Plant.
+ optional PlantStatus status = 3;
+}
+
+// PlantList is a collection of Plants.
+message PlantList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Plants.
+ repeated Plant items = 2;
+}
+
+// PlantSpec is the specification of a Plant.
+message PlantSpec {
+ // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes
+ // clusters to be added to Gardener.
+ optional k8s.io.api.core.v1.LocalObjectReference secretRef = 1;
+
+ // Endpoints is the configuration plant endpoints
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated Endpoint endpoints = 2;
+}
+
+// PlantStatus is the status of a Plant.
+message PlantStatus {
+ // Conditions represents the latest available observations of a Plant's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 1;
+
+ // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the
+ // Plant's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 2;
+
+ // ClusterInfo is additional computed information about the newly added cluster (Plant)
+ optional ClusterInfo clusterInfo = 3;
+}
+
+// Project holds certain properties about a Gardener project.
+message Project {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the project properties.
+ // +optional
+ optional ProjectSpec spec = 2;
+
+ // Most recently observed status of the Project.
+ // +optional
+ optional ProjectStatus status = 3;
+}
+
+// ProjectList is a collection of Projects.
+message ProjectList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Projects.
+ repeated Project items = 2;
+}
+
+// ProjectMember is a member of a project.
+message ProjectMember {
+ // Subject is representing a user name, an email address, or any other identifier of a user, group, or service
+ // account that has a certain role.
+ optional k8s.io.api.rbac.v1.Subject subject = 1;
+
+ // Role represents the role of this member.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles`
+ // list.
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ optional string role = 2;
+
+ // Roles represents the list of roles of this member.
+ // +optional
+ repeated string roles = 3;
+}
+
+// ProjectSpec is the specification of a Project.
+message ProjectSpec {
+ // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user
+ // who created the project.
+ // +optional
+ optional k8s.io.api.rbac.v1.Subject createdBy = 1;
+
+ // Description is a human-readable description of what the project is used for.
+ // +optional
+ optional string description = 2;
+
+ // Owner is a subject representing a user name, an email address, or any other identifier of a user owning
+ // the project.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner`
+ // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way
+ // to change the owner is to use this field.
+ // +optional
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ optional k8s.io.api.rbac.v1.Subject owner = 3;
+
+ // Purpose is a human-readable explanation of the project's purpose.
+ // +optional
+ optional string purpose = 4;
+
+ // Members is a list of subjects representing a user name, an email address, or any other identifier of a user,
+ // group, or service account that has a certain role.
+ // +optional
+ repeated ProjectMember members = 5;
+
+ // Namespace is the name of the namespace that has been created for the Project object.
+ // A nil value means that Gardener will determine the name of the namespace.
+ // +optional
+ optional string namespace = 6;
+
+ // Tolerations contains the default tolerations and a whitelist for taints on seed clusters.
+ // +optional
+ optional ProjectTolerations tolerations = 7;
+}
+
+// ProjectStatus holds the most recently observed status of the project.
+message ProjectStatus {
+ // ObservedGeneration is the most recent generation observed for this project.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // Phase is the current phase of the project.
+ optional string phase = 2;
+
+ // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleSinceTimestamp = 3;
+
+ // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted
+ // because it's stale/unused.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleAutoDeleteTimestamp = 4;
+}
+
+// ProjectTolerations contains the tolerations for taints on seed clusters.
+message ProjectTolerations {
+ // Defaults contains a list of tolerations that are added to the shoots in this project by default.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ repeated Toleration defaults = 1;
+
+ // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note
+ // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ repeated Toleration whitelist = 2;
+}
+
+// Provider contains provider-specific information that are handed-over to the provider-specific
+// extension controller.
+message Provider {
+ // Type is the type of the provider.
+ optional string type = 1;
+
+ // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension controlPlaneConfig = 2;
+
+ // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension infrastructureConfig = 3;
+
+ // Workers is a list of worker groups.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated Worker workers = 4;
+}
+
+message Quota {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the Quota constraints.
+ // +optional
+ optional QuotaSpec spec = 2;
+}
+
+// QuotaList is a collection of Quotas.
+message QuotaList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Quotas.
+ repeated Quota items = 2;
+}
+
+// QuotaSpec is the specification of a Quota.
+message QuotaSpec {
+ // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically.
+ // +optional
+ optional int32 clusterLifetimeDays = 1;
+
+ // Metrics is a list of resources which will be put under constraints.
+ map metrics = 2;
+
+ // Scope is the scope of the Quota object, either 'project' or 'secret'.
+ optional k8s.io.api.core.v1.ObjectReference scope = 3;
+}
+
+// Region contains certain properties of a region.
+message Region {
+ // Name is a region name.
+ optional string name = 1;
+
+ // Zones is a list of availability zones in this region.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated AvailabilityZone zones = 2;
+
+ // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region.
+ // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt
+ // quality, reliability, access restrictions, etc.
+ // +optional
+ map labels = 3;
+}
+
+// ResourceData holds the data of a resource referred to by an extension controller state.
+message ResourceData {
+ optional k8s.io.api.autoscaling.v1.CrossVersionObjectReference ref = 1;
+
+ // Data of the resource
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
+}
+
+// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource.
+message ResourceWatchCacheSize {
+ // APIGroup is the API group of the resource for which the watch cache size should be configured.
+ // An unset value is used to specify the legacy core API (e.g. for `secrets`).
+ // +optional
+ optional string apiGroup = 1;
+
+ // Resource is the name of the resource for which the watch cache size should be configured
+ // (in lowercase plural form, e.g. `secrets`).
+ optional string resource = 2;
+
+ // CacheSize specifies the watch cache size that should be configured for the specified resource.
+ optional int32 size = 3;
+}
+
+message SecretBinding {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // SecretRef is a reference to a secret object in the same or another namespace.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 2;
+
+ // Quotas is a list of references to Quota objects in the same or another namespace.
+ // +optional
+ repeated k8s.io.api.core.v1.ObjectReference quotas = 3;
+}
+
+// SecretBindingList is a collection of SecretBindings.
+message SecretBindingList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of SecretBindings.
+ repeated SecretBinding items = 2;
+}
+
+// Seed represents an installation request for an external controller.
+message Seed {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this installation.
+ optional SeedSpec spec = 2;
+
+ // Status contains the status of this installation.
+ optional SeedStatus status = 3;
+}
+
+// SeedBackup contains the object store configuration for backups for shoot (currently only etcd).
+message SeedBackup {
+ // Provider is a provider name.
+ optional string provider = 1;
+
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Region is a region name.
+ // +optional
+ optional string region = 3;
+
+ // SecretRef is a reference to a Secret object containing the cloud provider credentials for
+ // the object store where backups should be stored. It should have enough privileges to manipulate
+ // the objects as well as buckets.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 4;
+}
+
+// SeedDNS contains DNS-relevant information about this seed cluster.
+message SeedDNS {
+ // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ // This will be removed in the next API version and replaced by spec.ingress.domain.
+ // +optional
+ optional string ingressDomain = 1;
+
+ // Provider configures a DNSProvider
+ // +optional
+ optional SeedDNSProvider provider = 2;
+}
+
+// SeedDNSProvider configures a DNSProvider
+message SeedDNSProvider {
+ // Type describes the type of the dns-provider, for example `aws-route53`
+ optional string type = 1;
+
+ // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 2;
+
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude domains = 3;
+
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude zones = 4;
+}
+
+// SeedList is a collection of Seeds.
+message SeedList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Seeds.
+ repeated Seed items = 2;
+}
+
+// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster.
+message SeedNetworks {
+ // Nodes is the CIDR of the node network.
+ // +optional
+ optional string nodes = 1;
+
+ // Pods is the CIDR of the pod network.
+ optional string pods = 2;
+
+ // Services is the CIDR of the service network.
+ optional string services = 3;
+
+ // ShootDefaults contains the default networks CIDRs for shoots.
+ // +optional
+ optional ShootNetworks shootDefaults = 4;
+}
+
+// SeedProvider defines the provider type and region for this Seed cluster.
+message SeedProvider {
+ // Type is the name of the provider.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to Seed resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Region is a name of a region.
+ optional string region = 3;
+}
+
+// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile
+message SeedSelector {
+ // LabelSelector is optional and can be used to select seeds by their label settings
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
+
+ // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type.
+ // +optional
+ repeated string providerTypes = 2;
+}
+
+// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the
+// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11
+// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled.
+message SeedSettingExcessCapacityReservation {
+ // Enabled controls whether the excess capacity reservation should be enabled.
+ optional bool enabled = 1;
+}
+
+// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the
+// seed.
+message SeedSettingLoadBalancerServices {
+ // Annotations is a map of annotations that will be injected/merged into every load balancer service object.
+ // +optional
+ map annotations = 1;
+}
+
+// SeedSettingScheduling controls settings for scheduling decisions for the seed.
+message SeedSettingScheduling {
+ // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds
+ // are not considered by the scheduler.
+ optional bool visible = 1;
+}
+
+// SeedSettingShootDNS controls the shoot DNS settings for the seed.
+message SeedSettingShootDNS {
+ // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the
+ // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here.
+ // This is useful for environments where DNS is not required.
+ optional bool enabled = 1;
+}
+
+// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the
+// seed.
+message SeedSettingVerticalPodAutoscaler {
+ // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It
+ // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if
+ // your seed cluster already has another, manually/custom managed VPA deployment.
+ optional bool enabled = 1;
+}
+
+// SeedSettings contains certain settings for this seed cluster.
+message SeedSettings {
+ // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed.
+ // +optional
+ optional SeedSettingExcessCapacityReservation excessCapacityReservation = 1;
+
+ // Scheduling controls settings for scheduling decisions for the seed.
+ // +optional
+ optional SeedSettingScheduling scheduling = 2;
+
+ // ShootDNS controls the shoot DNS settings for the seed.
+ // +optional
+ optional SeedSettingShootDNS shootDNS = 3;
+
+ // LoadBalancerServices controls certain settings for services of type load balancer that are created in the
+ // seed.
+ // +optional
+ optional SeedSettingLoadBalancerServices loadBalancerServices = 4;
+
+ // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed.
+ // +optional
+ optional SeedSettingVerticalPodAutoscaler verticalPodAutoscaler = 5;
+}
+
+// SeedSpec is the specification of a Seed.
+message SeedSpec {
+ // Backup holds the object store configuration for the backups of shoot (currently only etcd).
+ // If it is not specified, then there won't be any backups taken for shoots associated with this seed.
+ // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored
+ // under the configured object store.
+ // +optional
+ optional SeedBackup backup = 1;
+
+ // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running
+ // in the seed cluster.
+ // +optional
+ repeated string blockCIDRs = 2;
+
+ // DNS contains DNS-relevant information about this seed cluster.
+ optional SeedDNS dns = 3;
+
+ // Networks defines the pod, service and worker network of the Seed cluster.
+ optional SeedNetworks networks = 4;
+
+ // Provider defines the provider type and region for this Seed cluster.
+ optional SeedProvider provider = 5;
+
+ // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for
+ // the account the Seed cluster has been deployed to.
+ // +optional
+ optional k8s.io.api.core.v1.SecretReference secretRef = 6;
+
+ // Taints describes taints on the seed.
+ // +optional
+ repeated SeedTaint taints = 7;
+
+ // Volume contains settings for persistentvolumes created in the seed cluster.
+ // +optional
+ optional SeedVolume volume = 8;
+
+ // Settings contains certain settings for this seed cluster.
+ // +optional
+ optional SeedSettings settings = 9;
+
+ // Ingress configures Ingress specific settings of the Seed cluster.
+ // +optional
+ optional Ingress ingress = 10;
+}
+
+// SeedStatus is the status of a Seed.
+message SeedStatus {
+ // Conditions represents the latest available observations of a Seed's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 1;
+
+ // Gardener holds information about the Gardener instance which last acted on the Seed.
+ // +optional
+ optional Gardener gardener = 2;
+
+ // KubernetesVersion is the Kubernetes version of the seed cluster.
+ // +optional
+ optional string kubernetesVersion = 3;
+
+ // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the
+ // Seed's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 4;
+
+ // ClusterIdentity is the identity of the Seed cluster
+ // +optional
+ optional string clusterIdentity = 5;
+}
+
+// SeedTaint describes a taint on a seed.
+message SeedTaint {
+ // Key is the taint key to be applied to a seed.
+ optional string key = 1;
+
+ // Value is the taint value corresponding to the taint key.
+ // +optional
+ optional string value = 2;
+}
+
+// SeedVolume contains settings for persistentvolumes created in the seed cluster.
+message SeedVolume {
+ // MinimumSize defines the minimum size that should be used for PVCs in the seed.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity minimumSize = 1;
+
+ // Providers is a list of storage class provisioner types for the seed.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated SeedVolumeProvider providers = 2;
+}
+
+// SeedVolumeProvider is a storage class provisioner type.
+message SeedVolumeProvider {
+ // Purpose is the purpose of this provider.
+ optional string purpose = 1;
+
+ // Name is the name of the storage class provisioner type.
+ optional string name = 2;
+}
+
+// ServiceAccountConfig is the kube-apiserver configuration for service accounts.
+message ServiceAccountConfig {
+ // Issuer is the identifier of the service account token issuer. The issuer will assert this
+ // identifier in "iss" claim of issued tokens. This value is a string or URI.
+ // Defaults to URI of the API server.
+ // +optional
+ optional string issuer = 1;
+
+ // SigningKeySecret is a reference to a secret that contains an optional private key of the
+ // service account token issuer. The issuer will sign issued ID tokens with this private key.
+ // Only useful if service account tokens are also issued by another external system.
+ // +optional
+ optional k8s.io.api.core.v1.LocalObjectReference signingKeySecretName = 2;
+}
+
+message Shoot {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the Shoot cluster.
+ // +optional
+ optional ShootSpec spec = 2;
+
+ // Most recently observed status of the Shoot cluster.
+ // +optional
+ optional ShootStatus status = 3;
+}
+
+// ShootList is a list of Shoot objects.
+message ShootList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Shoots.
+ repeated Shoot items = 2;
+}
+
+// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be
+// defined in the respective CloudProfile.
+message ShootMachineImage {
+ // Name is the name of the image.
+ optional string name = 1;
+
+ // ProviderConfig is the shoot's individual configuration passed to an extension resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Version is the version of the shoot's image.
+ // If version is not provided, it will be defaulted to the latest version from the CloudProfile.
+ // +optional
+ optional string version = 3;
+}
+
+// ShootNetworks contains the default networks CIDRs for shoots.
+message ShootNetworks {
+ // Pods is the CIDR of the pod network.
+ // +optional
+ optional string pods = 1;
+
+ // Services is the CIDR of the service network.
+ // +optional
+ optional string services = 2;
+}
+
+// ShootSpec is the specification of a Shoot.
+message ShootSpec {
+ // Addons contains information about enabled/disabled addons and their configuration.
+ // +optional
+ optional Addons addons = 1;
+
+ // CloudProfileName is a name of a CloudProfile object.
+ optional string cloudProfileName = 2;
+
+ // DNS contains information about the DNS settings of the Shoot.
+ // +optional
+ optional DNS dns = 3;
+
+ // Extensions contain type and provider information for Shoot extensions.
+ // +optional
+ repeated Extension extensions = 4;
+
+ // Hibernation contains information whether the Shoot is suspended or not.
+ // +optional
+ optional Hibernation hibernation = 5;
+
+ // Kubernetes contains the version and configuration settings of the control plane components.
+ optional Kubernetes kubernetes = 6;
+
+ // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc.
+ optional Networking networking = 7;
+
+ // Maintenance contains information about the time window for maintenance operations and which
+ // operations should be performed.
+ // +optional
+ optional Maintenance maintenance = 8;
+
+ // Monitoring contains information about custom monitoring configurations for the shoot.
+ // +optional
+ optional Monitoring monitoring = 9;
+
+ // Provider contains all provider-specific and provider-relevant information.
+ optional Provider provider = 10;
+
+ // Purpose is the purpose class for this cluster.
+ // +optional
+ optional string purpose = 11;
+
+ // Region is a name of a region.
+ optional string region = 12;
+
+ // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret.
+ // The credentials inside the provider secret will be used to create the shoot in the respective account.
+ optional string secretBindingName = 13;
+
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot.
+ // +optional
+ optional string seedName = 14;
+
+ // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed.
+ // +optional
+ optional SeedSelector seedSelector = 15;
+
+ // Resources holds a list of named resource references that can be referred to in extension configs by their names.
+ // +optional
+ repeated NamedResourceReference resources = 16;
+
+ // Tolerations contains the tolerations for taints on seed clusters.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ repeated Toleration tolerations = 17;
+}
+
+// ShootState contains a snapshot of the Shoot's state required to migrate the Shoot's control plane to a new Seed.
+message ShootState {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the ShootState.
+ // +optional
+ optional ShootStateSpec spec = 2;
+}
+
+// ShootStateList is a list of ShootState objects.
+message ShootStateList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ShootStates.
+ repeated ShootState items = 2;
+}
+
+// ShootStateSpec is the specification of the ShootState.
+message ShootStateSpec {
+ // Gardener holds the data required to generate resources deployed by the gardenlet
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated GardenerResourceData gardener = 1;
+
+ // Extensions holds the state of custom resources reconciled by extension controllers in the seed
+ // +optional
+ repeated ExtensionResourceState extensions = 2;
+
+ // Resources holds the data of resources referred to by extension controller states
+ // +optional
+ repeated ResourceData resources = 3;
+}
+
+// ShootStatus holds the most recently observed status of the Shoot cluster.
+message ShootStatus {
+ // Conditions represents the latest available observations of a Shoots's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated Condition conditions = 1;
+
+ // Constraints represents conditions of a Shoot's current state that constraint some operations on it.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated Condition constraints = 2;
+
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ optional Gardener gardener = 3;
+
+ // IsHibernated indicates whether the Shoot is currently hibernated.
+ optional bool hibernated = 4;
+
+ // LastOperation holds information about the last operation on the Shoot.
+ // +optional
+ optional LastOperation lastOperation = 5;
+
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ optional LastError lastError = 6;
+
+ // LastErrors holds information about the last occurred error(s) during an operation.
+ // +optional
+ repeated LastError lastErrors = 7;
+
+ // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the
+ // Shoot's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 8;
+
+ // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation
+ // must be retried until we give up).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time retryCycleStartTime = 9;
+
+ // Seed is the name of the seed cluster that runs the control plane of the Shoot. This value is only written
+ // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds.
+ // +optional
+ optional string seed = 10;
+
+ // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and
+ // basically everything that is related to this particular Shoot.
+ optional string technicalID = 11;
+
+ // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters.
+ // It is used to compute unique hashes.
+ optional string uid = 12;
+
+ // ClusterIdentity is the identity of the Shoot cluster
+ // +optional
+ optional string clusterIdentity = 13;
+}
+
+// Toleration is a toleration for a seed taint.
+message Toleration {
+ // Key is the toleration key to be applied to a project or shoot.
+ optional string key = 1;
+
+ // Value is the toleration value corresponding to the toleration key.
+ // +optional
+ optional string value = 2;
+}
+
+// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+message VerticalPodAutoscaler {
+ // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster.
+ optional bool enabled = 1;
+
+ // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given
+ // threshold since its start and if it has only one container (default: 10m0s).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictAfterOOMThreshold = 2;
+
+ // EvictionRateBurst defines the burst of pods that can be evicted (default: 1)
+ // +optional
+ optional int32 evictionRateBurst = 3;
+
+ // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will
+ // disable the rate limiter (default: -1).
+ // +optional
+ optional double evictionRateLimit = 4;
+
+ // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one
+ // pod can be evicted (default: 0.5).
+ // +optional
+ optional double evictionTolerance = 5;
+
+ // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request
+ // (default: 0.15).
+ // +optional
+ optional double recommendationMarginFraction = 6;
+
+ // UpdaterInterval is the interval how often the updater should run (default: 1m0s).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration updaterInterval = 7;
+
+ // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration recommenderInterval = 8;
+}
+
+// Volume contains information about the volume type and size.
+message Volume {
+ // Name of the volume to make it referencable.
+ // +optional
+ optional string name = 1;
+
+ // Type is the type of the volume.
+ // +optional
+ optional string type = 2;
+
+ // Size is the size of the volume.
+ optional string size = 3;
+
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ optional bool primary = 4;
+}
+
+// VolumeType contains certain properties of a volume type.
+message VolumeType {
+ // Class is the class of the volume type.
+ optional string class = 1;
+
+ // Name is the name of the volume type.
+ optional string name = 2;
+
+ // Usable defines if the volume type can be used for shoot clusters.
+ // +optional
+ optional bool usable = 3;
+}
+
+// WatchCacheSizes contains configuration of the API server's watch cache sizes.
+message WatchCacheSizes {
+ // Default configures the default watch cache size of the kube-apiserver
+ // (flag `--default-watch-cache-size`, defaults to 100).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ optional int32 default = 1;
+
+ // Resources configures the watch cache size of the kube-apiserver per resource
+ // (flag `--watch-cache-sizes`).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ repeated ResourceWatchCacheSize resources = 2;
+}
+
+// Worker is the base definition of a worker group.
+message Worker {
+ // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool.
+ // +optional
+ map annotations = 1;
+
+ // CABundle is a certificate bundle which will be installed onto every machine of this worker pool.
+ // +optional
+ optional string caBundle = 2;
+
+ // CRI contains configurations of CRI support of every machine in the worker pool
+ // +optional
+ optional CRI cri = 3;
+
+ // Kubernetes contains configuration for Kubernetes components related to this worker pool.
+ // +optional
+ optional WorkerKubernetes kubernetes = 4;
+
+ // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool.
+ // +optional
+ map labels = 5;
+
+ // Name is the name of the worker group.
+ optional string name = 6;
+
+ // Machine contains information about the machine type and image.
+ optional Machine machine = 7;
+
+ // Maximum is the maximum number of VMs to create.
+ optional int32 maximum = 8;
+
+ // Minimum is the minimum number of VMs to create.
+ optional int32 minimum = 9;
+
+ // MaxSurge is maximum number of VMs that are created during an update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 10;
+
+ // MaxUnavailable is the maximum number of VMs that can be unavailable during an update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 11;
+
+ // ProviderConfig is the provider-specific configuration for this worker pool.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 12;
+
+ // Taints is a list of taints for all the `Node` objects in this worker pool.
+ // +optional
+ repeated k8s.io.api.core.v1.Taint taints = 13;
+
+ // Volume contains information about the volume type and size.
+ // +optional
+ optional Volume volume = 14;
+
+ // DataVolumes contains a list of additional worker volumes.
+ // +optional
+ repeated DataVolume dataVolumes = 15;
+
+ // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state.
+ // +optional
+ optional string kubeletDataVolumeName = 16;
+
+ // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional
+ // as not every provider may support availability zones.
+ // +optional
+ repeated string zones = 17;
+
+ // SystemComponents contains configuration for system components related to this worker pool
+ // +optional
+ optional WorkerSystemComponents systemComponents = 18;
+
+ // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+ // +optional
+ optional MachineControllerManagerSettings machineControllerManager = 19;
+}
+
+// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool.
+message WorkerKubernetes {
+ // Kubelet contains configuration settings for all kubelets of this worker pool.
+ // +optional
+ optional KubeletConfig kubelet = 1;
+}
+
+// WorkerSystemComponents contains configuration for system components related to this worker pool
+message WorkerSystemComponents {
+ // Allow determines whether the pool should be allowed to host system components or not (defaults to true)
+ optional bool allow = 1;
+}
+
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/condition_builder.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/condition_builder.go
new file mode 100644
index 0000000..515292e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/condition_builder.go
@@ -0,0 +1,155 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ "fmt"
+
+ gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ConditionBuilder build a Condition.
+type ConditionBuilder interface {
+ WithOldCondition(old gardencorev1alpha1.Condition) ConditionBuilder
+ WithStatus(status gardencorev1alpha1.ConditionStatus) ConditionBuilder
+ WithReason(reason string) ConditionBuilder
+ WithMessage(message string) ConditionBuilder
+ WithCodes(codes ...gardencorev1alpha1.ErrorCode) ConditionBuilder
+ WithNowFunc(now func() metav1.Time) ConditionBuilder
+ Build() (new gardencorev1alpha1.Condition, updated bool)
+}
+
+// defaultConditionBuilder build a Condition.
+type defaultConditionBuilder struct {
+ old gardencorev1alpha1.Condition
+ status gardencorev1alpha1.ConditionStatus
+ conditionType gardencorev1alpha1.ConditionType
+ reason string
+ message string
+ codes []gardencorev1alpha1.ErrorCode
+ nowFunc func() metav1.Time
+}
+
+// NewConditionBuilder returns a ConditionBuilder for a specific condition.
+func NewConditionBuilder(conditionType gardencorev1alpha1.ConditionType) (ConditionBuilder, error) {
+ if conditionType == "" {
+ return nil, fmt.Errorf("conditionType cannot be empty")
+ }
+
+ return &defaultConditionBuilder{
+ conditionType: conditionType,
+ nowFunc: metav1.Now,
+ }, nil
+}
+
+// WithOldCondition sets the old condition. It can be used to prodive default values.
+// The old's condition type is overridden to the one specified in the builder.
+func (b *defaultConditionBuilder) WithOldCondition(old gardencorev1alpha1.Condition) ConditionBuilder {
+ old.Type = b.conditionType
+ b.old = old
+
+ return b
+}
+
+// WithStatus sets the status of the condition.
+func (b *defaultConditionBuilder) WithStatus(status gardencorev1alpha1.ConditionStatus) ConditionBuilder {
+ b.status = status
+ return b
+}
+
+// WithReason sets the reason of the condition.
+func (b *defaultConditionBuilder) WithReason(reason string) ConditionBuilder {
+ b.reason = reason
+ return b
+}
+
+// WithMessage sets the message of the condition.
+func (b *defaultConditionBuilder) WithMessage(message string) ConditionBuilder {
+ b.message = message
+ return b
+}
+
+// WithCodes sets the codes of the condition.
+func (b *defaultConditionBuilder) WithCodes(codes ...gardencorev1alpha1.ErrorCode) ConditionBuilder {
+ b.codes = codes
+ return b
+}
+
+// WithNowFunc sets the function used for getting the current time.
+// Should only be used for tests.
+func (b *defaultConditionBuilder) WithNowFunc(now func() metav1.Time) ConditionBuilder {
+ b.nowFunc = now
+ return b
+}
+
+// Build creates the condition and returns if there are modifications with the OldCondition.
+// If OldCondition is provided:
+// - Any changes to status set the `LastTransitionTime`
+// - Any updates to the message or the reason cause set `LastUpdateTime` to the current time.
+func (b *defaultConditionBuilder) Build() (new gardencorev1alpha1.Condition, updated bool) {
+ var (
+ now = b.nowFunc()
+ emptyTime = metav1.Time{}
+ )
+
+ new = *b.old.DeepCopy()
+
+ if new.LastTransitionTime == emptyTime {
+ new.LastTransitionTime = now
+ }
+
+ if new.LastUpdateTime == emptyTime {
+ new.LastUpdateTime = now
+ }
+
+ new.Type = b.conditionType
+
+ if b.status != "" {
+ new.Status = b.status
+ } else if b.status == "" && b.old.Status == "" {
+ new.Status = gardencorev1alpha1.ConditionUnknown
+ }
+
+ if b.reason != "" {
+ new.Reason = b.reason
+ } else if b.reason == "" && b.old.Reason == "" {
+ new.Reason = "ConditionInitialized"
+ }
+
+ if b.message != "" {
+ new.Message = b.message
+ } else if b.message == "" && b.old.Message == "" {
+ new.Message = "The condition has been initialized but its semantic check has not been performed yet."
+ }
+
+ if b.codes != nil {
+ new.Codes = b.codes
+ } else if b.codes == nil && b.old.Codes == nil {
+ new.Codes = nil
+ }
+
+ if new.Status != b.old.Status {
+ new.LastTransitionTime = now
+ }
+
+ if new.Reason != b.old.Reason || new.Message != b.old.Message {
+ new.LastUpdateTime = now
+ }
+
+ return new, !apiequality.Semantic.DeepEqual(new, b.old)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/helper.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/helper.go
new file mode 100644
index 0000000..6a9109d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/helper.go
@@ -0,0 +1,781 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ v1alpha1constants "github.com/gardener/gardener/pkg/apis/core/v1alpha1/constants"
+ "github.com/gardener/gardener/pkg/logger"
+ versionutils "github.com/gardener/gardener/pkg/utils/version"
+
+ "github.com/Masterminds/semver"
+ "github.com/pkg/errors"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// Now determines the current metav1.Time.
+var Now = metav1.Now
+
+// InitCondition initializes a new Condition with an Unknown status.
+func InitCondition(conditionType gardencorev1alpha1.ConditionType) gardencorev1alpha1.Condition {
+ return gardencorev1alpha1.Condition{
+ Type: conditionType,
+ Status: gardencorev1alpha1.ConditionUnknown,
+ Reason: "ConditionInitialized",
+ Message: "The condition has been initialized but its semantic check has not been performed yet.",
+ LastTransitionTime: Now(),
+ LastUpdateTime: Now(),
+ }
+}
+
+// NewConditions initializes the provided conditions based on an existing list. If a condition type does not exist
+// in the list yet, it will be set to default values.
+func NewConditions(conditions []gardencorev1alpha1.Condition, conditionTypes ...gardencorev1alpha1.ConditionType) []*gardencorev1alpha1.Condition {
+ newConditions := []*gardencorev1alpha1.Condition{}
+
+ // We retrieve the current conditions in order to update them appropriately.
+ for _, conditionType := range conditionTypes {
+ if c := GetCondition(conditions, conditionType); c != nil {
+ newConditions = append(newConditions, c)
+ continue
+ }
+ initializedCondition := InitCondition(conditionType)
+ newConditions = append(newConditions, &initializedCondition)
+ }
+
+ return newConditions
+}
+
+// GetCondition returns the condition with the given out of the list of .
+// In case the required type could not be found, it returns nil.
+func GetCondition(conditions []gardencorev1alpha1.Condition, conditionType gardencorev1alpha1.ConditionType) *gardencorev1alpha1.Condition {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ c := condition
+ return &c
+ }
+ }
+ return nil
+}
+
+// GetOrInitCondition tries to retrieve the condition with the given condition type from the given conditions.
+// If the condition could not be found, it returns an initialized condition of the given type.
+func GetOrInitCondition(conditions []gardencorev1alpha1.Condition, conditionType gardencorev1alpha1.ConditionType) gardencorev1alpha1.Condition {
+ if condition := GetCondition(conditions, conditionType); condition != nil {
+ return *condition
+ }
+ return InitCondition(conditionType)
+}
+
+// UpdatedCondition updates the properties of one specific condition.
+func UpdatedCondition(condition gardencorev1alpha1.Condition, status gardencorev1alpha1.ConditionStatus, reason, message string, codes ...gardencorev1alpha1.ErrorCode) gardencorev1alpha1.Condition {
+ newCondition := gardencorev1alpha1.Condition{
+ Type: condition.Type,
+ Status: status,
+ Reason: reason,
+ Message: message,
+ LastTransitionTime: condition.LastTransitionTime,
+ LastUpdateTime: Now(),
+ Codes: codes,
+ }
+
+ if condition.Status != status {
+ newCondition.LastTransitionTime = Now()
+ }
+ return newCondition
+}
+
+// UpdatedConditionUnknownError updates the condition to 'Unknown' status and the message of the given error.
+func UpdatedConditionUnknownError(condition gardencorev1alpha1.Condition, err error, codes ...gardencorev1alpha1.ErrorCode) gardencorev1alpha1.Condition {
+ return UpdatedConditionUnknownErrorMessage(condition, err.Error(), codes...)
+}
+
+// UpdatedConditionUnknownErrorMessage updates the condition with 'Unknown' status and the given message.
+func UpdatedConditionUnknownErrorMessage(condition gardencorev1alpha1.Condition, message string, codes ...gardencorev1alpha1.ErrorCode) gardencorev1alpha1.Condition {
+ return UpdatedCondition(condition, gardencorev1alpha1.ConditionUnknown, gardencorev1alpha1.ConditionCheckError, message, codes...)
+}
+
+// MergeConditions merges the given with the . Existing conditions are superseded by
+// the (depending on the condition type).
+func MergeConditions(oldConditions []gardencorev1alpha1.Condition, newConditions ...gardencorev1alpha1.Condition) []gardencorev1alpha1.Condition {
+ var (
+ out = make([]gardencorev1alpha1.Condition, 0, len(oldConditions)+len(newConditions))
+ typeToIndex = make(map[gardencorev1alpha1.ConditionType]int, len(oldConditions))
+ )
+
+ for i, condition := range oldConditions {
+ out = append(out, condition)
+ typeToIndex[condition.Type] = i
+ }
+
+ for _, condition := range newConditions {
+ if index, ok := typeToIndex[condition.Type]; ok {
+ out[index] = condition
+ continue
+ }
+ out = append(out, condition)
+ }
+
+ return out
+}
+
+// ConditionsNeedUpdate returns true if the must be updated based on .
+func ConditionsNeedUpdate(existingConditions, newConditions []gardencorev1alpha1.Condition) bool {
+ return existingConditions == nil || !apiequality.Semantic.DeepEqual(newConditions, existingConditions)
+}
+
+// IsResourceSupported returns true if a given combination of kind/type is part of a controller resources list.
+func IsResourceSupported(resources []gardencorev1alpha1.ControllerResource, resourceKind, resourceType string) bool {
+ for _, resource := range resources {
+ if resource.Kind == resourceKind && strings.EqualFold(resource.Type, resourceType) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsControllerInstallationSuccessful returns true if a ControllerInstallation has been marked as "successfully"
+// installed.
+func IsControllerInstallationSuccessful(controllerInstallation gardencorev1alpha1.ControllerInstallation) bool {
+ var (
+ installed bool
+ healthy bool
+ )
+
+ for _, condition := range controllerInstallation.Status.Conditions {
+ if condition.Type == gardencorev1alpha1.ControllerInstallationInstalled && condition.Status == gardencorev1alpha1.ConditionTrue {
+ installed = true
+ }
+ if condition.Type == gardencorev1alpha1.ControllerInstallationHealthy && condition.Status == gardencorev1alpha1.ConditionTrue {
+ healthy = true
+ }
+ }
+
+ return installed && healthy
+}
+
+// ComputeOperationType checks the and determines whether it is Create, Delete, Reconcile, Migrate or Restore operation
+func ComputeOperationType(meta metav1.ObjectMeta, lastOperation *gardencorev1alpha1.LastOperation) gardencorev1alpha1.LastOperationType {
+ switch {
+ case meta.Annotations[v1alpha1constants.GardenerOperation] == v1alpha1constants.GardenerOperationMigrate:
+ return gardencorev1alpha1.LastOperationTypeMigrate
+ case meta.Annotations[v1alpha1constants.GardenerOperation] == v1alpha1constants.GardenerOperationRestore:
+ return gardencorev1alpha1.LastOperationTypeRestore
+ case meta.DeletionTimestamp != nil:
+ return gardencorev1alpha1.LastOperationTypeDelete
+ case lastOperation == nil:
+ return gardencorev1alpha1.LastOperationTypeCreate
+ case (lastOperation.Type == gardencorev1alpha1.LastOperationTypeCreate && lastOperation.State != gardencorev1alpha1.LastOperationStateSucceeded):
+ return gardencorev1alpha1.LastOperationTypeCreate
+ case (lastOperation.Type == gardencorev1alpha1.LastOperationTypeMigrate && lastOperation.State != gardencorev1alpha1.LastOperationStateSucceeded):
+ return gardencorev1alpha1.LastOperationTypeMigrate
+ case (lastOperation.Type == gardencorev1alpha1.LastOperationTypeRestore && lastOperation.State != gardencorev1alpha1.LastOperationStateSucceeded):
+ return gardencorev1alpha1.LastOperationTypeRestore
+ }
+ return gardencorev1alpha1.LastOperationTypeReconcile
+}
+
+// TaintsHave returns true if the given key is part of the taints list.
+func TaintsHave(taints []gardencorev1alpha1.SeedTaint, key string) bool {
+ for _, taint := range taints {
+ if taint.Key == key {
+ return true
+ }
+ }
+ return false
+}
+
+type ShootedSeed struct {
+ DisableDNS *bool
+ DisableCapacityReservation *bool
+ Protected *bool
+ Visible *bool
+ MinimumVolumeSize *string
+ APIServer *ShootedSeedAPIServer
+ BlockCIDRs []string
+ ShootDefaults *gardencorev1alpha1.ShootNetworks
+ Backup *gardencorev1alpha1.SeedBackup
+ NoGardenlet bool
+ UseServiceAccountBootstrapping bool
+ WithSecretRef bool
+}
+
+type ShootedSeedAPIServer struct {
+ Replicas *int32
+ Autoscaler *ShootedSeedAPIServerAutoscaler
+}
+
+type ShootedSeedAPIServerAutoscaler struct {
+ MinReplicas *int32
+ MaxReplicas int32
+}
+
+func parseInt32(s string) (int32, error) {
+ i64, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i64), nil
+}
+
+func parseShootedSeed(annotation string) (*ShootedSeed, error) {
+ var (
+ flags = make(map[string]struct{})
+ settings = make(map[string]string)
+
+ trueVar = true
+ falseVar = false
+
+ shootedSeed ShootedSeed
+ )
+
+ for _, fragment := range strings.Split(annotation, ",") {
+ parts := strings.SplitN(fragment, "=", 2)
+ if len(parts) == 1 {
+ flags[fragment] = struct{}{}
+ continue
+ }
+
+ settings[parts[0]] = parts[1]
+ }
+
+ if _, ok := flags["true"]; !ok {
+ return nil, nil
+ }
+
+ apiServer, err := parseShootedSeedAPIServer(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.APIServer = apiServer
+
+ blockCIDRs, err := parseShootedSeedBlockCIDRs(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.BlockCIDRs = blockCIDRs
+
+ shootDefaults, err := parseShootedSeedShootDefaults(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.ShootDefaults = shootDefaults
+
+ backup, err := parseShootedSeedBackup(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.Backup = backup
+
+ if size, ok := settings["minimumVolumeSize"]; ok {
+ shootedSeed.MinimumVolumeSize = &size
+ }
+
+ if _, ok := flags["disable-dns"]; ok {
+ shootedSeed.DisableDNS = &trueVar
+ }
+ if _, ok := flags["disable-capacity-reservation"]; ok {
+ shootedSeed.DisableCapacityReservation = &trueVar
+ }
+ if _, ok := flags["no-gardenlet"]; ok {
+ shootedSeed.NoGardenlet = true
+ }
+ if _, ok := flags["use-serviceaccount-bootstrapping"]; ok {
+ shootedSeed.UseServiceAccountBootstrapping = true
+ }
+ if _, ok := flags["with-secret-ref"]; ok {
+ shootedSeed.WithSecretRef = true
+ }
+
+ if _, ok := flags["protected"]; ok {
+ shootedSeed.Protected = &trueVar
+ }
+ if _, ok := flags["unprotected"]; ok {
+ shootedSeed.Protected = &falseVar
+ }
+ if _, ok := flags["visible"]; ok {
+ shootedSeed.Visible = &trueVar
+ }
+ if _, ok := flags["invisible"]; ok {
+ shootedSeed.Visible = &falseVar
+ }
+
+ return &shootedSeed, nil
+}
+
+func parseShootedSeedBlockCIDRs(settings map[string]string) ([]string, error) {
+ cidrs, ok := settings["blockCIDRs"]
+ if !ok {
+ return nil, nil
+ }
+
+ return strings.Split(cidrs, ";"), nil
+}
+
+func parseShootedSeedShootDefaults(settings map[string]string) (*gardencorev1alpha1.ShootNetworks, error) {
+ var (
+ podCIDR, ok1 = settings["shootDefaults.pods"]
+ serviceCIDR, ok2 = settings["shootDefaults.services"]
+ )
+
+ if !ok1 && !ok2 {
+ return nil, nil
+ }
+
+ shootNetworks := &gardencorev1alpha1.ShootNetworks{}
+
+ if ok1 {
+ shootNetworks.Pods = &podCIDR
+ }
+
+ if ok2 {
+ shootNetworks.Services = &serviceCIDR
+ }
+
+ return shootNetworks, nil
+}
+
+func parseShootedSeedBackup(settings map[string]string) (*gardencorev1alpha1.SeedBackup, error) {
+ var (
+ provider, ok1 = settings["backup.provider"]
+ region, ok2 = settings["backup.region"]
+ secretRefName, ok3 = settings["backup.secretRef.name"]
+ secretRefNamespace, ok4 = settings["backup.secretRef.namespace"]
+ )
+
+ if ok1 && provider == "none" {
+ return nil, nil
+ }
+
+ backup := &gardencorev1alpha1.SeedBackup{}
+
+ if ok1 {
+ backup.Provider = provider
+ }
+ if ok2 {
+ backup.Region = ®ion
+ }
+ if ok3 {
+ backup.SecretRef.Name = secretRefName
+ }
+ if ok4 {
+ backup.SecretRef.Namespace = secretRefNamespace
+ }
+
+ return backup, nil
+}
+
+func parseShootedSeedAPIServer(settings map[string]string) (*ShootedSeedAPIServer, error) {
+ apiServerAutoscaler, err := parseShootedSeedAPIServerAutoscaler(settings)
+ if err != nil {
+ return nil, err
+ }
+
+ replicasString, ok := settings["apiServer.replicas"]
+ if !ok && apiServerAutoscaler == nil {
+ return nil, nil
+ }
+
+ var apiServer ShootedSeedAPIServer
+
+ apiServer.Autoscaler = apiServerAutoscaler
+
+ if ok {
+ replicas, err := parseInt32(replicasString)
+ if err != nil {
+ return nil, err
+ }
+
+ apiServer.Replicas = &replicas
+ }
+
+ return &apiServer, nil
+}
+
+func parseShootedSeedAPIServerAutoscaler(settings map[string]string) (*ShootedSeedAPIServerAutoscaler, error) {
+ minReplicasString, ok1 := settings["apiServer.autoscaler.minReplicas"]
+ maxReplicasString, ok2 := settings["apiServer.autoscaler.maxReplicas"]
+ if !ok1 && !ok2 {
+ return nil, nil
+ }
+ if !ok2 {
+ return nil, fmt.Errorf("apiSrvMaxReplicas has to be specified for shooted seed API server autoscaler")
+ }
+
+ var apiServerAutoscaler ShootedSeedAPIServerAutoscaler
+
+ if ok1 {
+ minReplicas, err := parseInt32(minReplicasString)
+ if err != nil {
+ return nil, err
+ }
+ apiServerAutoscaler.MinReplicas = &minReplicas
+ }
+
+ maxReplicas, err := parseInt32(maxReplicasString)
+ if err != nil {
+ return nil, err
+ }
+ apiServerAutoscaler.MaxReplicas = maxReplicas
+
+ return &apiServerAutoscaler, nil
+}
+
+func validateShootedSeed(shootedSeed *ShootedSeed, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if shootedSeed.APIServer != nil {
+ allErrs = validateShootedSeedAPIServer(shootedSeed.APIServer, fldPath.Child("apiServer"))
+ }
+
+ return allErrs
+}
+
+func validateShootedSeedAPIServer(apiServer *ShootedSeedAPIServer, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if apiServer.Replicas != nil && *apiServer.Replicas < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), *apiServer.Replicas, "must be greater than 0"))
+ }
+ if apiServer.Autoscaler != nil {
+ allErrs = append(allErrs, validateShootedSeedAPIServerAutoscaler(apiServer.Autoscaler, fldPath.Child("autoscaler"))...)
+ }
+
+ return allErrs
+}
+
+func validateShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAutoscaler, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, "must be greater than 0"))
+ }
+ if autoscaler.MaxReplicas < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0"))
+ }
+ if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than or equal to `minReplicas`"))
+ }
+
+ return allErrs
+}
+
+func setDefaults_ShootedSeed(shootedSeed *ShootedSeed) {
+ if shootedSeed.APIServer == nil {
+ shootedSeed.APIServer = &ShootedSeedAPIServer{}
+ }
+ setDefaults_ShootedSeedAPIServer(shootedSeed.APIServer)
+}
+
+func setDefaults_ShootedSeedAPIServer(apiServer *ShootedSeedAPIServer) {
+ if apiServer.Replicas == nil {
+ three := int32(3)
+ apiServer.Replicas = &three
+ }
+ if apiServer.Autoscaler == nil {
+ apiServer.Autoscaler = &ShootedSeedAPIServerAutoscaler{
+ MaxReplicas: 3,
+ }
+ }
+ setDefaults_ShootedSeedAPIServerAutoscaler(apiServer.Autoscaler)
+}
+
+func minInt32(a int32, b int32) int32 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func setDefaults_ShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAutoscaler) {
+ if autoscaler.MinReplicas == nil {
+ minReplicas := minInt32(3, autoscaler.MaxReplicas)
+ autoscaler.MinReplicas = &minReplicas
+ }
+}
+
+// ReadShootedSeed determines whether the Shoot has been marked to be registered automatically as a Seed cluster.
+func ReadShootedSeed(shoot *gardencorev1alpha1.Shoot) (*ShootedSeed, error) {
+ if shoot.Namespace != v1alpha1constants.GardenNamespace || shoot.Annotations == nil {
+ return nil, nil
+ }
+
+ val, ok := shoot.Annotations[v1alpha1constants.AnnotationShootUseAsSeed]
+ if !ok {
+ return nil, nil
+ }
+
+ shootedSeed, err := parseShootedSeed(val)
+ if err != nil {
+ return nil, err
+ }
+
+ if shootedSeed == nil {
+ return nil, nil
+ }
+
+ setDefaults_ShootedSeed(shootedSeed)
+
+ if errs := validateShootedSeed(shootedSeed, nil); len(errs) > 0 {
+ return nil, errs.ToAggregate()
+ }
+
+ return shootedSeed, nil
+}
+
+// HibernationIsEnabled checks if the given shoot's desired state is hibernated.
+func HibernationIsEnabled(shoot *gardencorev1alpha1.Shoot) bool {
+ return shoot.Spec.Hibernation != nil && shoot.Spec.Hibernation.Enabled != nil && *shoot.Spec.Hibernation.Enabled
+}
+
+// ShootWantsClusterAutoscaler checks if the given Shoot needs a cluster autoscaler.
+// This is determined by checking whether one of the Shoot workers has a different
+// Maximum than Minimum.
+func ShootWantsClusterAutoscaler(shoot *gardencorev1alpha1.Shoot) (bool, error) {
+ for _, worker := range shoot.Spec.Provider.Workers {
+ if worker.Maximum > worker.Minimum {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ShootIgnoresAlerts checks if the alerts for the annotated shoot cluster should be ignored.
+func ShootIgnoresAlerts(shoot *gardencorev1alpha1.Shoot) bool {
+ ignore := false
+ if value, ok := shoot.Annotations[v1alpha1constants.AnnotationShootIgnoreAlerts]; ok {
+ ignore, _ = strconv.ParseBool(value)
+ }
+ return ignore
+}
+
+// ShootWantsBasicAuthentication returns true if basic authentication is not configured or
+// if it is set explicitly to 'true'.
+func ShootWantsBasicAuthentication(shoot *gardencorev1alpha1.Shoot) bool {
+ kubeAPIServerConfig := shoot.Spec.Kubernetes.KubeAPIServer
+ if kubeAPIServerConfig == nil {
+ return true
+ }
+ if kubeAPIServerConfig.EnableBasicAuthentication == nil {
+ return true
+ }
+ return *kubeAPIServerConfig.EnableBasicAuthentication
+}
+
+// ShootUsesUnmanagedDNS returns true if the shoot's DNS section is marked as 'unmanaged'.
+func ShootUsesUnmanagedDNS(shoot *gardencorev1alpha1.Shoot) bool {
+ return shoot.Spec.DNS != nil && len(shoot.Spec.DNS.Providers) > 0 && shoot.Spec.DNS.Providers[0].Type != nil && *shoot.Spec.DNS.Providers[0].Type == "unmanaged"
+}
+
+// SystemComponentsAllowed checks if the given worker allows system components to be scheduled onto it
+func SystemComponentsAllowed(worker *gardencorev1alpha1.Worker) bool {
+ return worker.SystemComponents == nil || worker.SystemComponents.Allow
+}
+
+// GetMachineImagesFor returns a list of all machine images for a given shoot.
+func GetMachineImagesFor(shoot *gardencorev1alpha1.Shoot) []*gardencorev1alpha1.ShootMachineImage {
+ var workerMachineImages []*gardencorev1alpha1.ShootMachineImage
+ for _, worker := range shoot.Spec.Provider.Workers {
+ if worker.Machine.Image != nil {
+ workerMachineImages = append(workerMachineImages, worker.Machine.Image)
+ }
+ }
+ return workerMachineImages
+}
+
+// DetermineMachineImageForName finds the cloud specific machine images in the for the given and
+// region. In case it does not find the machine image with the , it returns false. Otherwise, true and the
+// cloud-specific machine image will be returned.
+func DetermineMachineImageForName(cloudProfile *gardencorev1alpha1.CloudProfile, name string) (bool, gardencorev1alpha1.MachineImage, error) {
+ for _, image := range cloudProfile.Spec.MachineImages {
+ if strings.EqualFold(image.Name, name) {
+ return true, image, nil
+ }
+ }
+ return false, gardencorev1alpha1.MachineImage{}, nil
+}
+
+// ShootMachineImageVersionExists checks if the shoot machine image (name, version) exists in the machine image constraint and returns true if yes and the index in the versions slice
+func ShootMachineImageVersionExists(constraint gardencorev1alpha1.MachineImage, image gardencorev1alpha1.ShootMachineImage) (bool, int) {
+ if constraint.Name != image.Name {
+ return false, 0
+ }
+
+ for index, v := range constraint.Versions {
+ if image.Version != nil && v.Version == *image.Version {
+ return true, index
+ }
+ }
+
+ return false, 0
+}
+
+// DetermineLatestMachineImageVersion determines the latest MachineImageVersion from a MachineImage
+func DetermineLatestMachineImageVersion(image gardencorev1alpha1.MachineImage) (*semver.Version, gardencorev1alpha1.MachineImageVersion, error) {
+ var (
+ latestSemVerVersion *semver.Version
+ latestMachineImageVersion gardencorev1alpha1.MachineImageVersion
+ )
+
+ for _, imageVersion := range image.Versions {
+ v, err := semver.NewVersion(imageVersion.Version)
+ if err != nil {
+ return nil, gardencorev1alpha1.MachineImageVersion{}, fmt.Errorf("error while parsing machine image version '%s' of machine image '%s': version not valid: %s", imageVersion.Version, image.Name, err.Error())
+ }
+ if latestSemVerVersion == nil || v.GreaterThan(latestSemVerVersion) {
+ latestSemVerVersion = v
+ latestMachineImageVersion = imageVersion
+ }
+ }
+ return latestSemVerVersion, latestMachineImageVersion, nil
+}
+
+// GetShootMachineImageFromLatestMachineImageVersion determines the latest version in a machine image and returns that as a ShootMachineImage
+func GetShootMachineImageFromLatestMachineImageVersion(image gardencorev1alpha1.MachineImage) (*semver.Version, gardencorev1alpha1.ShootMachineImage, error) {
+ latestSemVerVersion, latestImage, err := DetermineLatestMachineImageVersion(image)
+ if err != nil {
+ return nil, gardencorev1alpha1.ShootMachineImage{}, err
+ }
+ return latestSemVerVersion, gardencorev1alpha1.ShootMachineImage{Name: image.Name, Version: &latestImage.Version}, nil
+}
+
+// UpdateMachineImages updates the machine images in place.
+func UpdateMachineImages(workers []gardencorev1alpha1.Worker, machineImages []*gardencorev1alpha1.ShootMachineImage) {
+ for _, machineImage := range machineImages {
+ for idx, worker := range workers {
+ if worker.Machine.Image != nil && machineImage.Name == worker.Machine.Image.Name {
+ logger.Logger.Infof("Updating worker images of worker '%s' from version %s to version %s", worker.Name, *worker.Machine.Image.Version, *machineImage.Version)
+ workers[idx].Machine.Image = machineImage
+ }
+ }
+ }
+}
+
+// KubernetesVersionExistsInCloudProfile checks if the given Kubernetes version exists in the CloudProfile
+func KubernetesVersionExistsInCloudProfile(cloudProfile *gardencorev1alpha1.CloudProfile, currentVersion string) (bool, gardencorev1alpha1.ExpirableVersion, error) {
+ for _, version := range cloudProfile.Spec.Kubernetes.Versions {
+ ok, err := versionutils.CompareVersions(version.Version, "=", currentVersion)
+ if err != nil {
+ return false, gardencorev1alpha1.ExpirableVersion{}, err
+ }
+ if ok {
+ return true, version, nil
+ }
+ }
+ return false, gardencorev1alpha1.ExpirableVersion{}, nil
+}
+
+// DetermineLatestKubernetesPatchVersion finds the latest Kubernetes patch version in the compared
+// to the given . In case it does not find a newer patch version, it returns false. Otherwise,
+// true and the found version will be returned.
+func DetermineLatestKubernetesPatchVersion(cloudProfile *gardencorev1alpha1.CloudProfile, currentVersion string) (bool, string, error) {
+ ok, newerVersions, _, err := determineNextKubernetesVersions(cloudProfile, currentVersion, "~")
+ if err != nil || !ok {
+ return ok, "", err
+ }
+ sort.Strings(newerVersions)
+ return true, newerVersions[len(newerVersions)-1], nil
+}
+
+// DetermineNextKubernetesMinorVersion finds the next available Kubernetes minor version in the compared
+// to the given . In case it does not find a newer minor version, it returns false. Otherwise,
+// true and the found version will be returned.
+func DetermineNextKubernetesMinorVersion(cloudProfile *gardencorev1alpha1.CloudProfile, currentVersion string) (bool, string, error) {
+ ok, newerVersions, _, err := determineNextKubernetesVersions(cloudProfile, currentVersion, "^")
+ if err != nil || !ok {
+ return ok, "", err
+ }
+ sort.Strings(newerVersions)
+ return true, newerVersions[0], nil
+}
+
+// determineKubernetesVersions finds newer Kubernetes versions in the compared
+// with the to the given . The has to be a github.com/Masterminds/semver
+// range comparison symbol. In case it does not find a newer version, it returns false. Otherwise,
+// true and the found version will be returned.
+func determineNextKubernetesVersions(cloudProfile *gardencorev1alpha1.CloudProfile, currentVersion, operator string) (bool, []string, []gardencorev1alpha1.ExpirableVersion, error) {
+ var (
+ newerVersions = []gardencorev1alpha1.ExpirableVersion{}
+ newerVersionsString = []string{}
+ )
+
+ for _, version := range cloudProfile.Spec.Kubernetes.Versions {
+ ok, err := versionutils.CompareVersions(version.Version, operator, currentVersion)
+ if err != nil {
+ return false, []string{}, []gardencorev1alpha1.ExpirableVersion{}, err
+ }
+ if version.Version != currentVersion && ok {
+ newerVersions = append(newerVersions, version)
+ newerVersionsString = append(newerVersionsString, version.Version)
+ }
+ }
+
+ if len(newerVersions) == 0 {
+ return false, []string{}, []gardencorev1alpha1.ExpirableVersion{}, nil
+ }
+
+ return true, newerVersionsString, newerVersions, nil
+}
+
+// SetMachineImageVersionsToMachineImage sets imageVersions to the matching imageName in the machineImages.
+func SetMachineImageVersionsToMachineImage(machineImages []gardencorev1alpha1.MachineImage, imageName string, imageVersions []gardencorev1alpha1.MachineImageVersion) ([]gardencorev1alpha1.MachineImage, error) {
+ for index, image := range machineImages {
+ if strings.EqualFold(image.Name, imageName) {
+ machineImages[index].Versions = imageVersions
+ return machineImages, nil
+ }
+ }
+ return nil, fmt.Errorf("machine image with name '%s' could not be found", imageName)
+}
+
+// GetDefaultMachineImageFromCloudProfile gets the first MachineImage from the CloudProfile
+func GetDefaultMachineImageFromCloudProfile(profile gardencorev1alpha1.CloudProfile) *gardencorev1alpha1.MachineImage {
+ if len(profile.Spec.MachineImages) == 0 {
+ return nil
+ }
+ return &profile.Spec.MachineImages[0]
+}
+
+// WrapWithLastError is wrapper function for gardencorev1alpha1.LastError
+func WrapWithLastError(err error, lastError *gardencorev1alpha1.LastError) error {
+ if err == nil || lastError == nil {
+ return err
+ }
+ return errors.Wrapf(err, "last error: %s", lastError.Description)
+}
+
+// IsAPIServerExposureManaged returns true, if the Object is managed by Gardener for API server exposure.
+// This indicates to extensions that they should not mutate the object.
+// Gardener marks the kube-apiserver Service and Deployment as managed by it when it uses SNI to expose them.
+func IsAPIServerExposureManaged(obj metav1.Object) bool {
+ if obj == nil {
+ return false
+ }
+
+ if v, found := obj.GetLabels()[v1alpha1constants.LabelAPIServerExposure]; found &&
+ v == v1alpha1constants.LabelAPIServerExposureGardenerManaged {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/shootstate_list.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/shootstate_list.go
new file mode 100644
index 0000000..9951942
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper/shootstate_list.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+)
+
+// ExtensionResourceStateList is a list of ExtensionResourceStates
+type ExtensionResourceStateList []gardencorev1alpha1.ExtensionResourceState
+
+// Get retrieves an ExtensionResourceState for given kind, name and purpose from a list of ExtensionResourceStates
+// If no ExtensionResourceStates can be found, nil is returned.
+func (e *ExtensionResourceStateList) Get(kind string, name, purpose *string) *gardencorev1alpha1.ExtensionResourceState {
+ for _, obj := range *e {
+ if matchesExtensionResourceState(&obj, kind, name, purpose) {
+ return &obj
+ }
+ }
+ return nil
+}
+
+// Delete removes an ExtensionResourceState from the list by kind, name and purpose
+func (e *ExtensionResourceStateList) Delete(kind string, name, purpose *string) {
+ for i := len(*e) - 1; i >= 0; i-- {
+ if matchesExtensionResourceState(&(*e)[i], kind, name, purpose) {
+ *e = append((*e)[:i], (*e)[i+1:]...)
+ return
+ }
+ }
+}
+
+// Upsert either inserts or updates an already existing ExtensionResourceState with kind, name and purpose in the list
+func (e *ExtensionResourceStateList) Upsert(extensionResourceState *gardencorev1alpha1.ExtensionResourceState) {
+ for i, obj := range *e {
+ if matchesExtensionResourceState(&obj, extensionResourceState.Kind, extensionResourceState.Name, extensionResourceState.Purpose) {
+ (*e)[i].State = extensionResourceState.State
+ (*e)[i].Resources = extensionResourceState.Resources
+ return
+ }
+ }
+ *e = append(*e, *extensionResourceState)
+}
+
+func matchesExtensionResourceState(extensionResourceState *gardencorev1alpha1.ExtensionResourceState, kind string, name, purpose *string) bool {
+ if extensionResourceState.Kind == kind && apiequality.Semantic.DeepEqual(extensionResourceState.Name, name) && apiequality.Semantic.DeepEqual(extensionResourceState.Purpose, purpose) {
+ return true
+ }
+ return false
+}
+
+// GardenerResourceDataList is a list of GardenerResourceData
+type GardenerResourceDataList []gardencorev1alpha1.GardenerResourceData
+
+// Delete deletes an item from the list
+func (g *GardenerResourceDataList) Delete(name string) {
+ for i := len(*g) - 1; i >= 0; i-- {
+ if (*g)[i].Name == name {
+ *g = append((*g)[:i], (*g)[i+1:]...)
+ return
+ }
+ }
+}
+
+// Get returns the item from the list
+func (g *GardenerResourceDataList) Get(name string) *gardencorev1alpha1.GardenerResourceData {
+ for _, resourceDataEntry := range *g {
+ if resourceDataEntry.Name == name {
+ return &resourceDataEntry
+ }
+ }
+ return nil
+}
+
+// Upsert inserts a new element or updates an existing one
+func (g *GardenerResourceDataList) Upsert(data *gardencorev1alpha1.GardenerResourceData) {
+ for i, obj := range *g {
+ if obj.Name == data.Name {
+ (*g)[i].Type = data.Type
+ (*g)[i].Data = data.Data
+ return
+ }
+ }
+ *g = append(*g, *data)
+}
+
+// DeepCopy makes a deep copy of a GardenerResourceDataList
+func (g GardenerResourceDataList) DeepCopy() GardenerResourceDataList {
+ res := GardenerResourceDataList{}
+ for _, obj := range g {
+ res = append(res, *obj.DeepCopy())
+ }
+ return res
+}
+
+// ResourceDataList is a list of ResourceData
+type ResourceDataList []gardencorev1alpha1.ResourceData
+
+// Delete deletes an item from the list
+func (r *ResourceDataList) Delete(ref *autoscalingv1.CrossVersionObjectReference) {
+ for i := len(*r) - 1; i >= 0; i-- {
+ if apiequality.Semantic.DeepEqual((*r)[i].CrossVersionObjectReference, *ref) {
+ *r = append((*r)[:i], (*r)[i+1:]...)
+ return
+ }
+ }
+}
+
+// Get returns the item from the list
+func (r *ResourceDataList) Get(ref *autoscalingv1.CrossVersionObjectReference) *gardencorev1alpha1.ResourceData {
+ for _, obj := range *r {
+ if apiequality.Semantic.DeepEqual(obj.CrossVersionObjectReference, *ref) {
+ return &obj
+ }
+ }
+ return nil
+}
+
+// Upsert inserts a new element or updates an existing one
+func (r *ResourceDataList) Upsert(data *gardencorev1alpha1.ResourceData) {
+ for i, obj := range *r {
+ if apiequality.Semantic.DeepEqual(obj.CrossVersionObjectReference, data.CrossVersionObjectReference) {
+ (*r)[i].Data = data.Data
+ return
+ }
+ }
+ *r = append(*r, *data)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go
new file mode 100644
index 0000000..0e9c27c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the name of the core API group.
+const GroupName = "core.gardener.cloud"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind.
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource.
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is a new Scheme Builder which registers our API.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs)
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a reference to the Scheme Builder's AddToScheme function.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &BackupBucket{},
+ &BackupBucketList{},
+ &BackupEntry{},
+ &BackupEntryList{},
+ &CloudProfile{},
+ &CloudProfileList{},
+ &ControllerRegistration{},
+ &ControllerRegistrationList{},
+ &ControllerInstallation{},
+ &ControllerInstallationList{},
+ &Plant{},
+ &PlantList{},
+ &Project{},
+ &ProjectList{},
+ &Quota{},
+ &QuotaList{},
+ &SecretBinding{},
+ &SecretBindingList{},
+ &Seed{},
+ &SeedList{},
+ &ShootState{},
+ &ShootStateList{},
+ &Shoot{},
+ &ShootList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go
new file mode 100644
index 0000000..6bd416f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+const (
+ // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds'
+ // status using Lease resources for each Seed
+ GardenerSeedLeaseNamespace = "gardener-system-seed-lease"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go
new file mode 100644
index 0000000..7ec6112
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucket holds details about backup bucket
+type BackupBucket struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the Backup Bucket.
+ Spec BackupBucketSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the Backup Bucket.
+ Status BackupBucketStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucketList is a list of BackupBucket objects.
+type BackupBucketList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of BackupBucket.
+ Items []BackupBucket `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// BackupBucketSpec is the specification of a Backup Bucket.
+type BackupBucketSpec struct {
+ // Provider hold the details of cloud provider of the object store.
+ Provider BackupBucketProvider `json:"provider" protobuf:"bytes,1,opt,name=provider"`
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
+ // Seed holds the name of the seed allocated to BackupBucket for running controller.
+ // +optional
+ Seed *string `json:"seed,omitempty" protobuf:"bytes,4,opt,name=seed"`
+}
+
+// BackupBucketStatus holds the most recently observed status of the Backup Bucket.
+type BackupBucketStatus struct {
+ // ProviderStatus is the configuration passed to BackupBucket resource.
+ // +optional
+ ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,1,opt,name=providerStatus"`
+ // LastOperation holds information about the last operation on the BackupBucket.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,2,opt,name=lastOperation"`
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,3,opt,name=lastError"`
+ // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the
+ // BackupBucket's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"`
+ // GeneratedSecretRef is reference to the secret generated by backup bucket, which
+ // will have object store specific credentials.
+ // +optional
+ GeneratedSecretRef *corev1.SecretReference `json:"generatedSecretRef,omitempty" protobuf:"bytes,5,opt,name=generatedSecretRef"`
+}
+
+// BackupBucketProvider holds the details of cloud provider of the object store.
+type BackupBucketProvider struct {
+ // Type is the type of provider.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // Region is the region of the bucket.
+ Region string `json:"region" protobuf:"bytes,2,opt,name=region"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go
new file mode 100644
index 0000000..3d8efe8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // BackupEntryForceDeletion is a constant for an annotation on a BackupEntry indicating that it should be force deleted.
+ BackupEntryForceDeletion = "backupentry.core.gardener.cloud/force-deletion"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntry holds details about shoot backup.
+type BackupEntry struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of the Backup Entry.
+ // +optional
+ Spec BackupEntrySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the most recently observed status of the Backup Entry.
+ // +optional
+ Status BackupEntryStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntryList is a list of BackupEntry objects.
+type BackupEntryList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of BackupEntry.
+ Items []BackupEntry `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// BackupEntrySpec is the specification of a Backup Entry.
+type BackupEntrySpec struct {
+ // BucketName is the name of backup bucket for this Backup Entry.
+ BucketName string `json:"bucketName" protobuf:"bytes,1,opt,name=bucketName"`
+ // Seed holds the name of the seed allocated to BackupEntry for running controller.
+ // +optional
+ Seed *string `json:"seed,omitempty" protobuf:"bytes,2,opt,name=seed"`
+}
+
+// BackupEntryStatus holds the most recently observed status of the Backup Entry.
+type BackupEntryStatus struct {
+ // LastOperation holds information about the last operation on the BackupEntry.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,1,opt,name=lastOperation"`
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,2,opt,name=lastError"`
+ // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the
+ // BackupEntry's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go
new file mode 100644
index 0000000..2e0b09a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go
@@ -0,0 +1,226 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CloudProfile represents certain properties about a provider environment.
+type CloudProfile struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec defines the provider environment properties.
+ // +optional
+ Spec CloudProfileSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CloudProfileList is a collection of CloudProfiles.
+type CloudProfileList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of CloudProfiles.
+ Items []CloudProfile `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// CloudProfileSpec is the specification of a CloudProfile.
+// It must contain exactly one of its defined keys.
+type CloudProfileSpec struct {
+ // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile.
+ // +optional
+ CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"`
+ // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+ Kubernetes KubernetesSettings `json:"kubernetes" protobuf:"bytes,2,opt,name=kubernetes"`
+ // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ MachineImages []MachineImage `json:"machineImages" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,3,rep,name=machineImages"`
+ // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ MachineTypes []MachineType `json:"machineTypes" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=machineTypes"`
+ // ProviderConfig contains provider-specific configuration for the profile.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,5,opt,name=providerConfig"`
+ // Regions contains constraints regarding allowed values for regions and zones.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Regions []Region `json:"regions" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=regions"`
+ // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile.
+ // An empty list means that all seeds of the same provider type are supported.
+ // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes.
+ // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider
+ // type of the seed must match the shoot's provider.
+ // +optional
+ SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,7,opt,name=seedSelector"`
+ // Type is the name of the provider.
+ Type string `json:"type" protobuf:"bytes,8,opt,name=type"`
+ // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ VolumeTypes []VolumeType `json:"volumeTypes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeTypes"`
+}
+
+// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile
+type SeedSelector struct {
+ // LabelSelector is optional and can be used to select seeds by their label settings
+ // +optional
+ *metav1.LabelSelector `json:",inline,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+ // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type.
+ // +optional
+ ProviderTypes []string `json:"providerTypes,omitempty" protobuf:"bytes,2,rep,name=providerTypes"`
+}
+
+// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+type KubernetesSettings struct {
+ // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters.
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ // +optional
+ Versions []ExpirableVersion `json:"versions,omitempty" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,1,rep,name=versions"`
+}
+
+// MachineImage defines the name and multiple versions of the machine image in any environment.
+type MachineImage struct {
+ // Name is the name of the image.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Versions contains versions, expiration dates and container runtimes of the machine image
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ Versions []MachineImageVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,2,rep,name=versions"`
+}
+
+// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces
+type MachineImageVersion struct {
+ ExpirableVersion `json:",inline" protobuf:"bytes,1,opt,name=expirableVersion"`
+ // CRI list of supported container runtime and interfaces supported by this version
+ // +optional
+ CRI []CRI `json:"cri,omitempty" protobuf:"bytes,2,rep,name=cri"`
+}
+
+// ExpirableVersion contains a version and an expiration date.
+type ExpirableVersion struct {
+ // Version is the version identifier.
+ Version string `json:"version" protobuf:"bytes,1,opt,name=version"`
+ // ExpirationDate defines the time at which this version expires.
+ // +optional
+ ExpirationDate *metav1.Time `json:"expirationDate,omitempty" protobuf:"bytes,2,opt,name=expirationDate"`
+ // Classification defines the state of a version (preview, supported, deprecated)
+ // +optional
+ Classification *VersionClassification `json:"classification,omitempty" protobuf:"bytes,3,opt,name=classification,casttype=VersionClassification"`
+}
+
+// MachineType contains certain properties of a machine type.
+type MachineType struct {
+ // CPU is the number of CPUs for this machine type.
+ CPU resource.Quantity `json:"cpu" protobuf:"bytes,1,opt,name=cpu"`
+ // GPU is the number of GPUs for this machine type.
+ GPU resource.Quantity `json:"gpu" protobuf:"bytes,2,opt,name=gpu"`
+ // Memory is the amount of memory for this machine type.
+ Memory resource.Quantity `json:"memory" protobuf:"bytes,3,opt,name=memory"`
+ // Name is the name of the machine type.
+ Name string `json:"name" protobuf:"bytes,4,opt,name=name"`
+ // Storage is the amount of storage associated with the root volume of this machine type.
+ // +optional
+ Storage *MachineTypeStorage `json:"storage,omitempty" protobuf:"bytes,5,opt,name=storage"`
+ // Usable defines if the machine type can be used for shoot clusters.
+ // +optional
+ Usable *bool `json:"usable,omitempty" protobuf:"varint,6,opt,name=usable"`
+}
+
+// MachineTypeStorage is the amount of storage associated with the root volume of this machine type.
+type MachineTypeStorage struct {
+ // Class is the class of the storage type.
+ Class string `json:"class" protobuf:"bytes,1,opt,name=class"`
+ // StorageSize is the storage size.
+ StorageSize resource.Quantity `json:"size" protobuf:"bytes,2,opt,name=size"`
+ // Type is the type of the storage.
+ Type string `json:"type" protobuf:"bytes,3,opt,name=type"`
+}
+
+// Region contains certain properties of a region.
+type Region struct {
+ // Name is a region name.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Zones is a list of availability zones in this region.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Zones []AvailabilityZone `json:"zones,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=zones"`
+ // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region.
+ // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt
+ // quality, reliability, access restrictions, etc.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,3,rep,name=labels"`
+}
+
+// AvailabilityZone is an availability zone.
+type AvailabilityZone struct {
+ // Name is an an availability zone name.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // UnavailableMachineTypes is a list of machine type names that are not availability in this zone.
+ // +optional
+ UnavailableMachineTypes []string `json:"unavailableMachineTypes,omitempty" protobuf:"bytes,2,rep,name=unavailableMachineTypes"`
+ // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone.
+ // +optional
+ UnavailableVolumeTypes []string `json:"unavailableVolumeTypes,omitempty" protobuf:"bytes,3,rep,name=unavailableVolumeTypes"`
+}
+
+// VolumeType contains certain properties of a volume type.
+type VolumeType struct {
+ // Class is the class of the volume type.
+ Class string `json:"class" protobuf:"bytes,1,opt,name=class"`
+ // Name is the name of the volume type.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+ // Usable defines if the volume type can be used for shoot clusters.
+ // +optional
+ Usable *bool `json:"usable,omitempty" protobuf:"varint,3,opt,name=usable"`
+}
+
+const (
+ // VolumeClassStandard is a constant for the standard volume class.
+ VolumeClassStandard string = "standard"
+ // VolumeClassPremium is a constant for the premium volume class.
+ VolumeClassPremium string = "premium"
+)
+
+// VersionClassification is the logical state of a version according to https://github.com/gardener/gardener/blob/master/docs/operations/versioning.md
+type VersionClassification string
+
+const (
+ // ClassificationPreview indicates that a version has recently been added and not promoted to "Supported" yet.
+ // ClassificationPreview versions will not be considered for automatic Kubernetes and Machine Image patch version updates.
+ ClassificationPreview VersionClassification = "preview"
+ // ClassificationSupported indicates that a patch version is the recommended version for a shoot.
+ // Using VersionMaintenance (see: https://github.com/gardener/gardener/docs/operation/versioning.md) there is one supported version per maintained minor version.
+ // Supported versions are eligible for the automated Kubernetes and Machine image patch version update for shoot clusters in Gardener.
+ ClassificationSupported VersionClassification = "supported"
+ // ClassificationDeprecated indicates that a patch version should not be used anymore, should be updated to a new version
+ // and will eventually expire.
+ ClassificationDeprecated VersionClassification = "deprecated"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go
new file mode 100644
index 0000000..17bd41c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// ErrorCode is a string alias.
+type ErrorCode string
+
+const (
+ // ErrorInfraUnauthorized indicates that the last error occurred due to invalid infrastructure credentials.
+ ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED"
+ // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient infrastructure privileges.
+ ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES"
+ // ErrorInfraQuotaExceeded indicates that the last error occurred due to infrastructure quota limits.
+ ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED"
+ // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the infrastructure level.
+ ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES"
+ // ErrorInfraResourcesDepleted indicates that the last error occurred due to depleted resource in the infrastructure.
+ ErrorInfraResourcesDepleted ErrorCode = "ERR_INFRA_RESOURCES_DEPLETED"
+ // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster that are stuck in deletion.
+ ErrorCleanupClusterResources ErrorCode = "ERR_CLEANUP_CLUSTER_RESOURCES"
+ // ErrorConfigurationProblem indicates that the last error occurred due to a configuration problem.
+ ErrorConfigurationProblem ErrorCode = "ERR_CONFIGURATION_PROBLEM"
+)
+
+// LastError indicates the last occurred error for an operation on a resource.
+type LastError struct {
+ // A human readable message indicating details about the last error.
+ Description string `json:"description" protobuf:"bytes,1,opt,name=description"`
+ // ID of the task which caused this last error
+ // +optional
+ TaskID *string `json:"taskID,omitempty" protobuf:"bytes,2,opt,name=taskID"`
+ // Well-defined error codes of the last error(s).
+ // +optional
+ Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,3,rep,name=codes,casttype=ErrorCode"`
+ // Last time the error was reported
+ // +optional
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"`
+}
+
+// LastOperationType is a string alias.
+type LastOperationType string
+
+const (
+ // LastOperationTypeCreate indicates a 'create' operation.
+ LastOperationTypeCreate LastOperationType = "Create"
+ // LastOperationTypeReconcile indicates a 'reconcile' operation.
+ LastOperationTypeReconcile LastOperationType = "Reconcile"
+ // LastOperationTypeDelete indicates a 'delete' operation.
+ LastOperationTypeDelete LastOperationType = "Delete"
+ // LastOperationTypeMigrate indicates a 'migrate' operation.
+ LastOperationTypeMigrate LastOperationType = "Migrate"
+ // LastOperationTypeRestore indicates a 'restore' operation.
+ LastOperationTypeRestore LastOperationType = "Restore"
+)
+
+// LastOperationState is a string alias.
+type LastOperationState string
+
+const (
+ // LastOperationStateProcessing indicates that an operation is ongoing.
+ LastOperationStateProcessing LastOperationState = "Processing"
+ // LastOperationStateSucceeded indicates that an operation has completed successfully.
+ LastOperationStateSucceeded LastOperationState = "Succeeded"
+ // LastOperationStateError indicates that an operation is completed with errors and will be retried.
+ LastOperationStateError LastOperationState = "Error"
+ // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried.
+ LastOperationStateFailed LastOperationState = "Failed"
+ // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future.
+ LastOperationStatePending LastOperationState = "Pending"
+ // LastOperationStateAborted indicates that an operation has been aborted.
+ LastOperationStateAborted LastOperationState = "Aborted"
+)
+
+// LastOperation indicates the type and the state of the last operation, along with a description
+// message and a progress indicator.
+type LastOperation struct {
+ // A human readable message indicating details about the last operation.
+ Description string `json:"description" protobuf:"bytes,1,opt,name=description"`
+ // Last time the operation state transitioned from one to another.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,2,opt,name=lastUpdateTime"`
+ // The progress in percentage (0-100) of the last operation.
+ Progress int32 `json:"progress" protobuf:"varint,3,opt,name=progress"`
+ // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed.
+ State LastOperationState `json:"state" protobuf:"bytes,4,opt,name=state,casttype=LastOperationState"`
+ // Type of the last operation, one of Create, Reconcile, Delete.
+ Type LastOperationType `json:"type" protobuf:"bytes,5,opt,name=type,casttype=LastOperationType"`
+}
+
+// Gardener holds the information about the Gardener version that operated a resource.
+type Gardener struct {
+ // ID is the Docker container id of the Gardener which last acted on a resource.
+ ID string `json:"id" protobuf:"bytes,1,opt,name=id"`
+ // Name is the hostname (pod name) of the Gardener which last acted on a resource.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+ // Version is the version of the Gardener which last acted on a resource.
+ Version string `json:"version" protobuf:"bytes,3,opt,name=version"`
+}
+
+const (
+ // GardenerName is the value in a Garden resource's `.metadata.finalizers[]` array on which the Gardener will react
+ // when performing a delete request on a resource.
+ GardenerName = "gardener"
+ // ExternalGardenerName is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the
+ // Gardener will react when performing a delete request on a resource.
+ ExternalGardenerName = "gardener.cloud/gardener"
+)
+
+const (
+ // EventReconciling indicates that the a Reconcile operation started.
+ EventReconciling = "Reconciling"
+ // EventReconciled indicates that the a Reconcile operation was successful.
+ EventReconciled = "Reconciled"
+ // EventReconcileError indicates that the a Reconcile operation failed.
+ EventReconcileError = "ReconcileError"
+ // EventDeleting indicates that the a Delete operation started.
+ EventDeleting = "Deleting"
+ // EventDeleted indicates that the a Delete operation was successful.
+ EventDeleted = "Deleted"
+ // EventDeleteError indicates that the a Delete operation failed.
+ EventDeleteError = "DeleteError"
+ // EventPrepareMigration indicates that a Prepare Migration operation started.
+ EventPrepareMigration = "PrepareMigration"
+ // EventMigrationPrepared indicates that Migration preparation was successful.
+ EventMigrationPrepared = "MigrationPrepared"
+ // EventMigrationPreparationFailed indicates that Migration preparation failed.
+ EventMigrationPreparationFailed = "MigrationPreparationFailed"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go
new file mode 100644
index 0000000..7dd4ed9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerInstallation represents an installation request for an external controller.
+type ControllerInstallation struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this installation.
+ Spec ControllerInstallationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the status of this installation.
+ Status ControllerInstallationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerInstallationList is a collection of ControllerInstallations.
+type ControllerInstallationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of ControllerInstallations.
+ Items []ControllerInstallation `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ControllerInstallationSpec is the specification of a ControllerInstallation.
+type ControllerInstallationSpec struct {
+ // RegistrationRef is used to reference a ControllerRegistration resources.
+ RegistrationRef corev1.ObjectReference `json:"registrationRef" protobuf:"bytes,1,opt,name=registrationRef"`
+ // SeedRef is used to reference a Seed resources.
+ SeedRef corev1.ObjectReference `json:"seedRef" protobuf:"bytes,2,opt,name=seedRef"`
+}
+
+// ControllerInstallationStatus is the status of a ControllerInstallation.
+type ControllerInstallationStatus struct {
+ // Conditions represents the latest available observations of a ControllerInstallations's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // ProviderStatus contains type-specific status.
+ // +optional
+ ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,2,opt,name=providerStatus"`
+}
+
+const (
+ // ControllerInstallationHealthy is a condition type for indicating whether the controller is healthy.
+ ControllerInstallationHealthy ConditionType = "Healthy"
+ // ControllerInstallationInstalled is a condition type for indicating whether the controller has been installed.
+ ControllerInstallationInstalled ConditionType = "Installed"
+ // ControllerInstallationValid is a condition type for indicating whether the installation request is valid.
+ ControllerInstallationValid ConditionType = "Valid"
+ // ControllerInstallationRequired is a condition type for indicating that the respective extension controller is
+ // still required on the seed cluster as corresponding extension resources still exist.
+ ControllerInstallationRequired ConditionType = "Required"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go
new file mode 100644
index 0000000..2465795
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRegistration represents a registration of an external controller.
+type ControllerRegistration struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this registration.
+ Spec ControllerRegistrationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRegistrationList is a collection of ControllerRegistrations.
+type ControllerRegistrationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of ControllerRegistrations.
+ Items []ControllerRegistration `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ControllerRegistrationSpec is the specification of a ControllerRegistration.
+type ControllerRegistrationSpec struct {
+ // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types
+ // (aws-route53, gcp, auditlog, ...).
+ // +optional
+ Resources []ControllerResource `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"`
+ // Deployment contains information for how this controller is deployed.
+ // +optional
+ Deployment *ControllerDeployment `json:"deployment,omitempty" protobuf:"bytes,2,opt,name=deployment"`
+}
+
+// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this
+// kind (aws-route53, gcp, auditlog, ...).
+type ControllerResource struct {
+ // Kind is the resource kind, for example "OperatingSystemConfig".
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // Type is the resource type, for example "coreos" or "ubuntu".
+ Type string `json:"type" protobuf:"bytes,2,opt,name=type"`
+ // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters.
+ // +optional
+ GloballyEnabled *bool `json:"globallyEnabled,omitempty" protobuf:"varint,3,opt,name=globallyEnabled"`
+ // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation.
+ // +optional
+ ReconcileTimeout *metav1.Duration `json:"reconcileTimeout,omitempty" protobuf:"bytes,4,opt,name=reconcileTimeout"`
+ // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension
+ // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type
+ // combination.
+ // +optional
+ Primary *bool `json:"primary,omitempty" protobuf:"varint,5,opt,name=primary"`
+}
+
+// ControllerDeployment contains information for how this controller is deployed.
+type ControllerDeployment struct {
+ // Type is the deployment type.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig contains type-specific configuration.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Policy controls how the controller is deployed. It defaults to 'OnDemand'.
+ // +optional
+ Policy *ControllerDeploymentPolicy `json:"policy,omitempty" protobuf:"bytes,3,opt,name=policy"`
+ // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be
+ // considered for a deployment.
+ // An empty list means that all seeds are selected.
+ // +optional
+ SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty" protobuf:"bytes,4,opt,name=seedSelector"`
+}
+
+// ControllerDeploymentPolicy is a string alias.
+type ControllerDeploymentPolicy string
+
+const (
+ // ControllerDeploymentPolicyOnDemand specifies that the controller shall be only deployed if required by another
+ // resource. If nothing requires it then the controller shall not be deployed.
+ ControllerDeploymentPolicyOnDemand ControllerDeploymentPolicy = "OnDemand"
+ // ControllerDeploymentPolicyAlways specifies that the controller shall be deployed always, independent of whether
+ // another resource requires it or the respective seed has shoots.
+ ControllerDeploymentPolicyAlways ControllerDeploymentPolicy = "Always"
+ // ControllerDeploymentPolicyAlwaysExceptNoShoots specifies that the controller shall be deployed always, independent of
+ // whether another resource requires it, but only when the respective seed has at least one shoot.
+ ControllerDeploymentPolicyAlwaysExceptNoShoots ControllerDeploymentPolicy = "AlwaysExceptNoShoots"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go
new file mode 100644
index 0000000..26d891e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Plant struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this Plant.
+ Spec PlantSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the status of this Plant.
+ Status PlantStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PlantList is a collection of Plants.
+type PlantList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Plants.
+ Items []Plant `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+const (
+ // PlantEveryNodeReady is a constant for a condition type indicating the node health.
+ PlantEveryNodeReady ConditionType = "EveryNodeReady"
+ // PlantAPIServerAvailable is a constant for a condition type indicating that the Plant cluster API server is available.
+ PlantAPIServerAvailable ConditionType = "APIServerAvailable"
+)
+
+// PlantSpec is the specification of a Plant.
+type PlantSpec struct {
+ // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes
+ // clusters to be added to Gardener.
+ SecretRef corev1.LocalObjectReference `json:"secretRef" protobuf:"bytes,1,opt,name=secretRef"`
+ // Endpoints is the configuration plant endpoints
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=endpoints"`
+}
+
+// PlantStatus is the status of a Plant.
+type PlantStatus struct {
+ // Conditions represents the latest available observations of a Plant's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the
+ // Plant's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"`
+ // ClusterInfo is additional computed information about the newly added cluster (Plant)
+ ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty" protobuf:"bytes,3,opt,name=clusterInfo"`
+}
+
+// Endpoint is an endpoint for monitoring, logging and other services around the plant.
+type Endpoint struct {
+ // Name is the name of the endpoint
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // URL is the url of the endpoint
+ URL string `json:"url" protobuf:"bytes,2,opt,name=url"`
+ // Purpose is the purpose of the endpoint
+ Purpose string `json:"purpose" protobuf:"bytes,3,opt,name=purpose"`
+}
+
+// ClusterInfo contains information about the Plant cluster
+type ClusterInfo struct {
+ // Cloud describes the cloud information
+ Cloud CloudInfo `json:"cloud" protobuf:"bytes,1,opt,name=cloud"`
+ // Kubernetes describes kubernetes meta information (e.g., version)
+ Kubernetes KubernetesInfo `json:"kubernetes" protobuf:"bytes,2,opt,name=kubernetes"`
+}
+
+// CloudInfo contains information about the cloud
+type CloudInfo struct {
+ // Type is the cloud type
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // Region is the cloud region
+ Region string `json:"region" protobuf:"bytes,2,opt,name=region"`
+}
+
+// KubernetesInfo contains the version and configuration variables for the Plant cluster.
+type KubernetesInfo struct {
+ // Version is the semantic Kubernetes version to use for the Plant cluster.
+ Version string `json:"version" protobuf:"bytes,1,opt,name=version"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go
new file mode 100644
index 0000000..b09dee7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go
@@ -0,0 +1,174 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Project holds certain properties about a Gardener project.
+type Project struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec defines the project properties.
+ // +optional
+ Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the Project.
+ // +optional
+ Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ProjectList is a collection of Projects.
+type ProjectList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Projects.
+ Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ProjectSpec is the specification of a Project.
+type ProjectSpec struct {
+ // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user
+ // who created the project.
+ // +optional
+ CreatedBy *rbacv1.Subject `json:"createdBy,omitempty" protobuf:"bytes,1,opt,name=createdBy"`
+ // Description is a human-readable description of what the project is used for.
+ // +optional
+ Description *string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"`
+ // Owner is a subject representing a user name, an email address, or any other identifier of a user owning
+ // the project.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner`
+ // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way
+ // to change the owner is to use this field.
+ // +optional
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ Owner *rbacv1.Subject `json:"owner,omitempty" protobuf:"bytes,3,opt,name=owner"`
+ // Purpose is a human-readable explanation of the project's purpose.
+ // +optional
+ Purpose *string `json:"purpose,omitempty" protobuf:"bytes,4,opt,name=purpose"`
+ // Members is a list of subjects representing a user name, an email address, or any other identifier of a user,
+ // group, or service account that has a certain role.
+ // +optional
+ Members []ProjectMember `json:"members,omitempty" protobuf:"bytes,5,rep,name=members"`
+ // Namespace is the name of the namespace that has been created for the Project object.
+ // A nil value means that Gardener will determine the name of the namespace.
+ // +optional
+ Namespace *string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
+ // Tolerations contains the default tolerations and a whitelist for taints on seed clusters.
+ // +optional
+ Tolerations *ProjectTolerations `json:"tolerations,omitempty" protobuf:"bytes,7,opt,name=tolerations"`
+}
+
+// ProjectStatus holds the most recently observed status of the project.
+type ProjectStatus struct {
+ // ObservedGeneration is the most recent generation observed for this project.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+ // Phase is the current phase of the project.
+ Phase ProjectPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=ProjectPhase"`
+ // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused.
+ // +optional
+ StaleSinceTimestamp *metav1.Time `json:"staleSinceTimestamp,omitempty" protobuf:"bytes,3,opt,name=staleSinceTimestamp"`
+ // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted
+ // because it's stale/unused.
+ // +optional
+ StaleAutoDeleteTimestamp *metav1.Time `json:"staleAutoDeleteTimestamp,omitempty" protobuf:"bytes,4,opt,name=staleAutoDeleteTimestamp"`
+}
+
+// ProjectMember is a member of a project.
+type ProjectMember struct {
+ // Subject is representing a user name, an email address, or any other identifier of a user, group, or service
+ // account that has a certain role.
+ rbacv1.Subject `json:",inline" protobuf:"bytes,1,opt,name=subject"`
+ // Role represents the role of this member.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles`
+ // list.
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ Role string `json:"role" protobuf:"bytes,2,opt,name=role"`
+ // Roles represents the list of roles of this member.
+ // +optional
+ Roles []string `json:"roles,omitempty" protobuf:"bytes,3,rep,name=roles"`
+}
+
+// ProjectTolerations contains the tolerations for taints on seed clusters.
+type ProjectTolerations struct {
+ // Defaults contains a list of tolerations that are added to the shoots in this project by default.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ Defaults []Toleration `json:"defaults,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,rep,name=defaults"`
+ // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note
+ // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ Whitelist []Toleration `json:"whitelist,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,2,rep,name=whitelist"`
+}
+
+// Toleration is a toleration for a seed taint.
+type Toleration struct {
+ // Key is the toleration key to be applied to a project or shoot.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // Value is the toleration value corresponding to the toleration key.
+ // +optional
+ Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+const (
+ // ProjectMemberAdmin is a const for a role that provides full admin access.
+ ProjectMemberAdmin = "admin"
+ // ProjectMemberOwner is a const for a role that provides full owner access.
+ ProjectMemberOwner = "owner"
+ // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources.
+ ProjectMemberViewer = "viewer"
+ // ProjectMemberUserAccessManager is a const for a role that provides permissions to manage human user(s, (groups)).
+ ProjectMemberUserAccessManager = "uam"
+ // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener.
+ ProjectMemberExtensionPrefix = "extension:"
+)
+
+// ProjectPhase is a label for the condition of a project at the current time.
+type ProjectPhase string
+
+const (
+ // ProjectPending indicates that the project reconciliation is pending.
+ ProjectPending ProjectPhase = "Pending"
+ // ProjectReady indicates that the project reconciliation was successful.
+ ProjectReady ProjectPhase = "Ready"
+ // ProjectFailed indicates that the project reconciliation failed.
+ ProjectFailed ProjectPhase = "Failed"
+ // ProjectTerminating indicates that the project is in termination process.
+ ProjectTerminating ProjectPhase = "Terminating"
+
+ // ProjectEventNamespaceReconcileFailed indicates that the namespace reconciliation has failed.
+ ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed"
+ // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded.
+ ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful"
+ // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed.
+ ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed"
+ // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion.
+ ProjectEventNamespaceMarkedForDeletion = "NamespaceMarkedForDeletion"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go
new file mode 100644
index 0000000..07269b2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Quota struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec defines the Quota constraints.
+ // +optional
+ Spec QuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// QuotaList is a collection of Quotas.
+type QuotaList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Quotas.
+ Items []Quota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// QuotaSpec is the specification of a Quota.
+type QuotaSpec struct {
+ // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically.
+ // +optional
+ ClusterLifetimeDays *int32 `json:"clusterLifetimeDays,omitempty" protobuf:"varint,1,opt,name=clusterLifetimeDays"`
+ // Metrics is a list of resources which will be put under constraints.
+ Metrics corev1.ResourceList `json:"metrics" protobuf:"bytes,2,rep,name=metrics,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName"`
+ // Scope is the scope of the Quota object, either 'project' or 'secret'.
+ Scope corev1.ObjectReference `json:"scope" protobuf:"bytes,3,opt,name=scope"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go
new file mode 100644
index 0000000..a391b5c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type SecretBinding struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // SecretRef is a reference to a secret object in the same or another namespace.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"`
+ // Quotas is a list of references to Quota objects in the same or another namespace.
+ // +optional
+ Quotas []corev1.ObjectReference `json:"quotas,omitempty" protobuf:"bytes,3,rep,name=quotas"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SecretBindingList is a collection of SecretBindings.
+type SecretBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of SecretBindings.
+ Items []SecretBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go
new file mode 100644
index 0000000..25d8e7e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go
@@ -0,0 +1,307 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Seed represents an installation request for an external controller.
+type Seed struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this installation.
+ Spec SeedSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the status of this installation.
+ Status SeedStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SeedList is a collection of Seeds.
+type SeedList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Seeds.
+ Items []Seed `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// SeedSpec is the specification of a Seed.
+type SeedSpec struct {
+ // Backup holds the object store configuration for the backups of shoot (currently only etcd).
+ // If it is not specified, then there won't be any backups taken for shoots associated with this seed.
+ // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored
+ // under the configured object store.
+ // +optional
+ Backup *SeedBackup `json:"backup,omitempty" protobuf:"bytes,1,opt,name=backup"`
+ // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running
+ // in the seed cluster.
+ // +optional
+ BlockCIDRs []string `json:"blockCIDRs,omitempty" protobuf:"bytes,2,rep,name=blockCIDRs"`
+ // DNS contains DNS-relevant information about this seed cluster.
+ DNS SeedDNS `json:"dns" protobuf:"bytes,3,opt,name=dns"`
+ // Networks defines the pod, service and worker network of the Seed cluster.
+ Networks SeedNetworks `json:"networks" protobuf:"bytes,4,opt,name=networks"`
+ // Provider defines the provider type and region for this Seed cluster.
+ Provider SeedProvider `json:"provider" protobuf:"bytes,5,opt,name=provider"`
+ // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for
+ // the account the Seed cluster has been deployed to.
+ // +optional
+ SecretRef *corev1.SecretReference `json:"secretRef,omitempty" protobuf:"bytes,6,opt,name=secretRef"`
+ // Taints describes taints on the seed.
+ // +optional
+ Taints []SeedTaint `json:"taints,omitempty" protobuf:"bytes,7,rep,name=taints"`
+ // Volume contains settings for persistentvolumes created in the seed cluster.
+ // +optional
+ Volume *SeedVolume `json:"volume,omitempty" protobuf:"bytes,8,opt,name=volume"`
+ // Settings contains certain settings for this seed cluster.
+ // +optional
+ Settings *SeedSettings `json:"settings,omitempty" protobuf:"bytes,9,opt,name=settings"`
+ // Ingress configures Ingress specific settings of the Seed cluster.
+ // +optional
+ Ingress *Ingress `json:"ingress,omitempty" protobuf:"bytes,10,opt,name=ingress"`
+}
+
+// SeedStatus is the status of a Seed.
+type SeedStatus struct {
+ // Conditions represents the latest available observations of a Seed's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // Gardener holds information about the Gardener instance which last acted on the Seed.
+ // +optional
+ Gardener *Gardener `json:"gardener,omitempty" protobuf:"bytes,2,opt,name=gardener"`
+ // KubernetesVersion is the Kubernetes version of the seed cluster.
+ // +optional
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty" protobuf:"bytes,3,opt,name=kubernetesVersion"`
+ // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the
+ // Seed's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"`
+ // ClusterIdentity is the identity of the Seed cluster
+ // +optional
+ ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,5,opt,name=clusterIdentity"`
+}
+
+// SeedBackup contains the object store configuration for backups for shoot (currently only etcd).
+type SeedBackup struct {
+ // Provider is a provider name.
+ Provider string `json:"provider" protobuf:"bytes,1,opt,name=provider"`
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Region is a region name.
+ // +optional
+ Region *string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"`
+ // SecretRef is a reference to a Secret object containing the cloud provider credentials for
+ // the object store where backups should be stored. It should have enough privileges to manipulate
+ // the objects as well as buckets.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// SeedDNS contains DNS-relevant information about this seed cluster.
+type SeedDNS struct {
+ // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ // This will be removed in the next API version and replaced by spec.ingress.domain.
+ // +optional
+ IngressDomain *string `json:"ingressDomain,omitempty" protobuf:"bytes,1,opt,name=ingressDomain"`
+ // Provider configures a DNSProvider
+ // +optional
+ Provider *SeedDNSProvider `json:"provider,omitempty" protobuf:"bytes,2,opt,name=provider"`
+}
+
+// SeedDNSProvider configures a DNSProvider
+type SeedDNSProvider struct {
+ // Type describes the type of the dns-provider, for example `aws-route53`
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"`
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,3,opt,name=domains"`
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,4,opt,name=zones"`
+}
+
+// Ingress configures the Ingress specific settings of the Seed cluster.
+type Ingress struct {
+ // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ Domain string `json:"domain" protobuf:"bytes,1,opt,name=domain"`
+ // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain
+ Controller IngressController `json:"controller" protobuf:"bytes,2,opt,name=controller"`
+}
+
+// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain
+type IngressController struct {
+ // Kind defines which kind of IngressController to use, for example `nginx`
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // ProviderConfig specifies infrastructure specific configuration for the ingressController
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+}
+
+// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster.
+type SeedNetworks struct {
+ // Nodes is the CIDR of the node network.
+ // +optional
+ Nodes *string `json:"nodes,omitempty" protobuf:"bytes,1,opt,name=nodes"`
+ // Pods is the CIDR of the pod network.
+ Pods string `json:"pods" protobuf:"bytes,2,opt,name=pods"`
+ // Services is the CIDR of the service network.
+ Services string `json:"services" protobuf:"bytes,3,opt,name=services"`
+ // ShootDefaults contains the default networks CIDRs for shoots.
+ // +optional
+ ShootDefaults *ShootNetworks `json:"shootDefaults,omitempty" protobuf:"bytes,4,opt,name=shootDefaults"`
+}
+
+// ShootNetworks contains the default networks CIDRs for shoots.
+type ShootNetworks struct {
+ // Pods is the CIDR of the pod network.
+ // +optional
+ Pods *string `json:"pods,omitempty" protobuf:"bytes,1,opt,name=pods"`
+ // Services is the CIDR of the service network.
+ // +optional
+ Services *string `json:"services,omitempty" protobuf:"bytes,2,opt,name=services"`
+}
+
+// SeedProvider defines the provider type and region for this Seed cluster.
+type SeedProvider struct {
+ // Type is the name of the provider.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig is the configuration passed to Seed resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Region is a name of a region.
+ Region string `json:"region" protobuf:"bytes,3,opt,name=region"`
+}
+
+// SeedSettings contains certain settings for this seed cluster.
+type SeedSettings struct {
+ // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed.
+ // +optional
+ ExcessCapacityReservation *SeedSettingExcessCapacityReservation `json:"excessCapacityReservation,omitempty" protobuf:"bytes,1,opt,name=excessCapacityReservation"`
+ // Scheduling controls settings for scheduling decisions for the seed.
+ // +optional
+ Scheduling *SeedSettingScheduling `json:"scheduling,omitempty" protobuf:"bytes,2,opt,name=scheduling"`
+ // ShootDNS controls the shoot DNS settings for the seed.
+ // +optional
+ ShootDNS *SeedSettingShootDNS `json:"shootDNS,omitempty" protobuf:"bytes,3,opt,name=shootDNS"`
+ // LoadBalancerServices controls certain settings for services of type load balancer that are created in the
+ // seed.
+ // +optional
+ LoadBalancerServices *SeedSettingLoadBalancerServices `json:"loadBalancerServices,omitempty" protobuf:"bytes,4,opt,name=loadBalancerServices"`
+ // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed.
+ // +optional
+ VerticalPodAutoscaler *SeedSettingVerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,5,opt,name=verticalPodAutoscaler"`
+}
+
+// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the
+// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11
+// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled.
+type SeedSettingExcessCapacityReservation struct {
+ // Enabled controls whether the excess capacity reservation should be enabled.
+ Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"`
+}
+
+// SeedSettingShootDNS controls the shoot DNS settings for the seed.
+type SeedSettingShootDNS struct {
+ // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the
+ // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here.
+ // This is useful for environments where DNS is not required.
+ Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"`
+}
+
+// SeedSettingScheduling controls settings for scheduling decisions for the seed.
+type SeedSettingScheduling struct {
+ // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds
+ // are not considered by the scheduler.
+ Visible bool `json:"visible" protobuf:"bytes,1,opt,name=visible"`
+}
+
+// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the
+// seed.
+type SeedSettingLoadBalancerServices struct {
+ // Annotations is a map of annotations that will be injected/merged into every load balancer service object.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"`
+}
+
+// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the
+// seed.
+type SeedSettingVerticalPodAutoscaler struct {
+ // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It
+ // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if
+ // your seed cluster already has another, manually/custom managed VPA deployment.
+ Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"`
+}
+
+// SeedTaint describes a taint on a seed.
+type SeedTaint struct {
+ // Key is the taint key to be applied to a seed.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // Value is the taint value corresponding to the taint key.
+ // +optional
+ Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+const (
+ // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds
+ // may only be used by shoots in the `garden` namespace.
+ SeedTaintProtected = "seed.gardener.cloud/protected"
+)
+
+// SeedVolume contains settings for persistentvolumes created in the seed cluster.
+type SeedVolume struct {
+ // MinimumSize defines the minimum size that should be used for PVCs in the seed.
+ // +optional
+ MinimumSize *resource.Quantity `json:"minimumSize,omitempty" protobuf:"bytes,1,opt,name=minimumSize"`
+ // Providers is a list of storage class provisioner types for the seed.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Providers []SeedVolumeProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=providers"`
+}
+
+// SeedVolumeProvider is a storage class provisioner type.
+type SeedVolumeProvider struct {
+ // Purpose is the purpose of this provider.
+ Purpose string `json:"purpose" protobuf:"bytes,1,opt,name=purpose"`
+ // Name is the name of the storage class provisioner type.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+}
+
+const (
+ // SeedBootstrapped is a constant for a condition type indicating that the seed cluster has been
+ // bootstrapped.
+ SeedBootstrapped ConditionType = "Bootstrapped"
+ // SeedExtensionsReady is a constant for a condition type indicating that the extensions are ready.
+ SeedExtensionsReady ConditionType = "ExtensionsReady"
+ // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready.
+ SeedGardenletReady ConditionType = "GardenletReady"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go
new file mode 100644
index 0000000..59dbd52
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go
@@ -0,0 +1,1181 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "time"
+
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Shoot struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the Shoot cluster.
+ // +optional
+ Spec ShootSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the Shoot cluster.
+ // +optional
+ Status ShootStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootList is a list of Shoot objects.
+type ShootList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Shoots.
+ Items []Shoot `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ShootSpec is the specification of a Shoot.
+type ShootSpec struct {
+ // Addons contains information about enabled/disabled addons and their configuration.
+ // +optional
+ Addons *Addons `json:"addons,omitempty" protobuf:"bytes,1,opt,name=addons"`
+ // CloudProfileName is a name of a CloudProfile object.
+ CloudProfileName string `json:"cloudProfileName" protobuf:"bytes,2,opt,name=cloudProfileName"`
+ // DNS contains information about the DNS settings of the Shoot.
+ // +optional
+ DNS *DNS `json:"dns,omitempty" protobuf:"bytes,3,opt,name=dns"`
+ // Extensions contain type and provider information for Shoot extensions.
+ // +optional
+ Extensions []Extension `json:"extensions,omitempty" protobuf:"bytes,4,rep,name=extensions"`
+ // Hibernation contains information whether the Shoot is suspended or not.
+ // +optional
+ Hibernation *Hibernation `json:"hibernation,omitempty" protobuf:"bytes,5,opt,name=hibernation"`
+ // Kubernetes contains the version and configuration settings of the control plane components.
+ Kubernetes Kubernetes `json:"kubernetes" protobuf:"bytes,6,opt,name=kubernetes"`
+ // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc.
+ Networking Networking `json:"networking" protobuf:"bytes,7,opt,name=networking"`
+ // Maintenance contains information about the time window for maintenance operations and which
+ // operations should be performed.
+ // +optional
+ Maintenance *Maintenance `json:"maintenance,omitempty" protobuf:"bytes,8,opt,name=maintenance"`
+ // Monitoring contains information about custom monitoring configurations for the shoot.
+ // +optional
+ Monitoring *Monitoring `json:"monitoring,omitempty" protobuf:"bytes,9,opt,name=monitoring"`
+ // Provider contains all provider-specific and provider-relevant information.
+ Provider Provider `json:"provider" protobuf:"bytes,10,opt,name=provider"`
+ // Purpose is the purpose class for this cluster.
+ // +optional
+ Purpose *ShootPurpose `json:"purpose,omitempty" protobuf:"bytes,11,opt,name=purpose"`
+ // Region is a name of a region.
+ Region string `json:"region" protobuf:"bytes,12,opt,name=region"`
+ // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret.
+ // The credentials inside the provider secret will be used to create the shoot in the respective account.
+ SecretBindingName string `json:"secretBindingName" protobuf:"bytes,13,opt,name=secretBindingName"`
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot.
+ // +optional
+ SeedName *string `json:"seedName,omitempty" protobuf:"bytes,14,opt,name=seedName"`
+ // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed.
+ // +optional
+ SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,15,opt,name=seedSelector"`
+ // Resources holds a list of named resource references that can be referred to in extension configs by their names.
+ // +optional
+ Resources []NamedResourceReference `json:"resources,omitempty" protobuf:"bytes,16,rep,name=resources"`
+ // Tolerations contains the tolerations for taints on seed clusters.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ Tolerations []Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,17,rep,name=tolerations"`
+}
+
+// ShootStatus holds the most recently observed status of the Shoot cluster.
+type ShootStatus struct {
+ // Conditions represents the latest available observations of a Shoots's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // Constraints represents conditions of a Shoot's current state that constraint some operations on it.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Constraints []Condition `json:"constraints,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=constraints"`
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ Gardener Gardener `json:"gardener" protobuf:"bytes,3,opt,name=gardener"`
+ // IsHibernated indicates whether the Shoot is currently hibernated.
+ IsHibernated bool `json:"hibernated" protobuf:"varint,4,opt,name=hibernated"`
+ // LastOperation holds information about the last operation on the Shoot.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,5,opt,name=lastOperation"`
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,6,opt,name=lastError"`
+ // LastErrors holds information about the last occurred error(s) during an operation.
+ // +optional
+ LastErrors []LastError `json:"lastErrors,omitempty" protobuf:"bytes,7,rep,name=lastErrors"`
+ // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the
+ // Shoot's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"`
+ // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation
+ // must be retried until we give up).
+ // +optional
+ RetryCycleStartTime *metav1.Time `json:"retryCycleStartTime,omitempty" protobuf:"bytes,9,opt,name=retryCycleStartTime"`
+ // Seed is the name of the seed cluster that runs the control plane of the Shoot. This value is only written
+ // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds.
+ // +optional
+ Seed *string `json:"seed,omitempty" protobuf:"bytes,10,opt,name=seed"`
+ // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and
+ // basically everything that is related to this particular Shoot.
+ TechnicalID string `json:"technicalID" protobuf:"bytes,11,opt,name=technicalID"`
+ // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters.
+ // It is used to compute unique hashes.
+ UID types.UID `json:"uid" protobuf:"bytes,12,opt,name=uid"`
+ // ClusterIdentity is the identity of the Shoot cluster
+ // +optional
+ ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,13,opt,name=clusterIdentity"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Addons relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Addons is a collection of configuration for specific addons which are managed by the Gardener.
+type Addons struct {
+ // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon.
+ // +optional
+ KubernetesDashboard *KubernetesDashboard `json:"kubernetes-dashboard,omitempty" protobuf:"bytes,1,opt,name=kubernetesDashboard"`
+ // NginxIngress holds configuration settings for the nginx-ingress addon.
+ // +optional
+ NginxIngress *NginxIngress `json:"nginx-ingress,omitempty" protobuf:"bytes,2,opt,name=nginxIngress"`
+}
+
+// Addon allows enabling or disabling a specific addon and is used to derive from.
+type Addon struct {
+ // Enabled indicates whether the addon is enabled or not.
+ Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"`
+}
+
+// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon.
+type KubernetesDashboard struct {
+ Addon `json:",inline" protobuf:"bytes,2,opt,name=addon"`
+ // AuthenticationMode defines the authentication mode for the kubernetes-dashboard.
+ // +optional
+ AuthenticationMode *string `json:"authenticationMode,omitempty" protobuf:"bytes,1,opt,name=authenticationMode"`
+}
+
+const (
+ // KubernetesDashboardAuthModeBasic uses basic authentication mode for auth.
+ KubernetesDashboardAuthModeBasic = "basic"
+ // KubernetesDashboardAuthModeToken uses token-based mode for auth.
+ KubernetesDashboardAuthModeToken = "token"
+)
+
+// NginxIngress describes configuration values for the nginx-ingress addon.
+type NginxIngress struct {
+ Addon `json:",inline" protobuf:"bytes,4,opt,name=addon"`
+ // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress
+ // +optional
+ LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,1,rep,name=loadBalancerSourceRanges"`
+ // Config contains custom configuration for the nginx-ingress-controller configuration.
+ // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options
+ // +optional
+ Config map[string]string `json:"config,omitempty" protobuf:"bytes,2,rep,name=config"`
+ // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service`
+ // exposing the nginx-ingress. Defaults to `Cluster`.
+ // +optional
+ ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,3,opt,name=externalTrafficPolicy,casttype=k8s.io/api/core/v1.ServiceExternalTrafficPolicyType"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// DNS relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// DNS holds information about the provider, the hosted zone id and the domain.
+type DNS struct {
+ // Domain is the external available domain of the Shoot cluster. This domain will be written into the
+ // kubeconfig that is handed out to end-users. Once set it is immutable.
+ // +optional
+ Domain *string `json:"domain,omitempty" protobuf:"bytes,1,opt,name=domain"`
+ // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if
+ // not a default domain is used.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Providers []DNSProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=providers"`
+}
+
+// DNSProvider contains information about a DNS provider.
+type DNSProvider struct {
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,1,opt,name=domains"`
+ // Primary indicates that this DNSProvider is used for shoot related domains.
+ // +optional
+ Primary *bool `json:"primary,omitempty" protobuf:"varint,2,opt,name=primary"`
+ // SecretName is a name of a secret containing credentials for the stated domain and the
+ // provider. When not specified, the Gardener will use the cloud provider credentials referenced
+ // by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override
+ // this behavior, i.e. forcing the Gardener to only look into the given secret.
+ // +optional
+ SecretName *string `json:"secretName,omitempty" protobuf:"bytes,3,opt,name=secretName"`
+ // Type is the DNS provider type.
+ // +optional
+ Type *string `json:"type,omitempty" protobuf:"bytes,4,opt,name=type"`
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,5,opt,name=zones"`
+}
+
+type DNSIncludeExclude struct {
+ // Include is a list of resources that shall be included.
+ // +optional
+ Include []string `json:"include,omitempty" protobuf:"bytes,1,rep,name=include"`
+ // Exclude is a list of resources that shall be excluded.
+ // +optional
+ Exclude []string `json:"exclude,omitempty" protobuf:"bytes,2,rep,name=exclude"`
+}
+
+// DefaultDomain is the default value in the Shoot's '.spec.dns.domain' when '.spec.dns.provider' is 'unmanaged'
+const DefaultDomain = "cluster.local"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Extension relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Extension contains type and provider information for Shoot extensions.
+type Extension struct {
+ // Type is the type of the extension resource.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig is the configuration passed to extension resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators.
+ // +optional
+ Disabled *bool `json:"disabled,omitempty" protobuf:"varint,3,opt,name=disabled"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// NamedResourceReference relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// NamedResourceReference is a named reference to a resource.
+type NamedResourceReference struct {
+ // Name of the resource reference.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // ResourceRef is a reference to a resource.
+ ResourceRef autoscalingv1.CrossVersionObjectReference `json:"resourceRef" protobuf:"bytes,2,opt,name=resourceRef"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Hibernation relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Hibernation contains information whether the Shoot is suspended or not.
+type Hibernation struct {
+ // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated.
+ // If it is false or nil, the Shoot's desired state is to be awaken.
+ // +optional
+ Enabled *bool `json:"enabled,omitempty" protobuf:"varint,1,opt,name=enabled"`
+ // Schedules determine the hibernation schedules.
+ // +optional
+ Schedules []HibernationSchedule `json:"schedules,omitempty" protobuf:"bytes,2,rep,name=schedules"`
+}
+
+// HibernationSchedule determines the hibernation schedule of a Shoot.
+// A Shoot will be regularly hibernated at each start time and will be woken up at each end time.
+// Start or End can be omitted, though at least one of each has to be specified.
+type HibernationSchedule struct {
+ // Start is a Cron spec at which time a Shoot will be hibernated.
+ // +optional
+ Start *string `json:"start,omitempty" protobuf:"bytes,1,opt,name=start"`
+ // End is a Cron spec at which time a Shoot will be woken up.
+ // +optional
+ End *string `json:"end,omitempty" protobuf:"bytes,2,opt,name=end"`
+ // Location is the time location in which both start and and shall be evaluated.
+ // +optional
+ Location *string `json:"location,omitempty" protobuf:"bytes,3,opt,name=location"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Kubernetes relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Kubernetes contains the version and configuration variables for the Shoot control plane.
+type Kubernetes struct {
+ // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true).
+ // +optional
+ AllowPrivilegedContainers *bool `json:"allowPrivilegedContainers,omitempty" protobuf:"varint,1,opt,name=allowPrivilegedContainers"`
+ // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+ // +optional
+ ClusterAutoscaler *ClusterAutoscaler `json:"clusterAutoscaler,omitempty" protobuf:"bytes,2,opt,name=clusterAutoscaler"`
+ // KubeAPIServer contains configuration settings for the kube-apiserver.
+ // +optional
+ KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty" protobuf:"bytes,3,opt,name=kubeAPIServer"`
+ // KubeControllerManager contains configuration settings for the kube-controller-manager.
+ // +optional
+ KubeControllerManager *KubeControllerManagerConfig `json:"kubeControllerManager,omitempty" protobuf:"bytes,4,opt,name=kubeControllerManager"`
+ // KubeScheduler contains configuration settings for the kube-scheduler.
+ // +optional
+ KubeScheduler *KubeSchedulerConfig `json:"kubeScheduler,omitempty" protobuf:"bytes,5,opt,name=kubeScheduler"`
+ // KubeProxy contains configuration settings for the kube-proxy.
+ // +optional
+ KubeProxy *KubeProxyConfig `json:"kubeProxy,omitempty" protobuf:"bytes,6,opt,name=kubeProxy"`
+ // Kubelet contains configuration settings for the kubelet.
+ // +optional
+ Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,7,opt,name=kubelet"`
+ // Version is the semantic Kubernetes version to use for the Shoot cluster.
+ Version string `json:"version" protobuf:"bytes,8,opt,name=version"`
+ // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+ // +optional
+ VerticalPodAutoscaler *VerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,9,opt,name=verticalPodAutoscaler"`
+}
+
+// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+type ClusterAutoscaler struct {
+ // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour).
+ // +optional
+ ScaleDownDelayAfterAdd *metav1.Duration `json:"scaleDownDelayAfterAdd,omitempty" protobuf:"bytes,1,opt,name=scaleDownDelayAfterAdd"`
+ // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval).
+ // +optional
+ ScaleDownDelayAfterDelete *metav1.Duration `json:"scaleDownDelayAfterDelete,omitempty" protobuf:"bytes,2,opt,name=scaleDownDelayAfterDelete"`
+ // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins).
+ // +optional
+ ScaleDownDelayAfterFailure *metav1.Duration `json:"scaleDownDelayAfterFailure,omitempty" protobuf:"bytes,3,opt,name=scaleDownDelayAfterFailure"`
+ // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins).
+ // +optional
+ ScaleDownUnneededTime *metav1.Duration `json:"scaleDownUnneededTime,omitempty" protobuf:"bytes,4,opt,name=scaleDownUnneededTime"`
+ // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed
+ // +optional
+ ScaleDownUtilizationThreshold *float64 `json:"scaleDownUtilizationThreshold,omitempty" protobuf:"fixed64,5,opt,name=scaleDownUtilizationThreshold"`
+ // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs).
+ // +optional
+ ScanInterval *metav1.Duration `json:"scanInterval,omitempty" protobuf:"bytes,6,opt,name=scanInterval"`
+}
+
+// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+type VerticalPodAutoscaler struct {
+ // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster.
+ Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"`
+ // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given
+ // threshold since its start and if it has only one container (default: 10m0s).
+ // +optional
+ EvictAfterOOMThreshold *metav1.Duration `json:"evictAfterOOMThreshold,omitempty" protobuf:"bytes,2,opt,name=evictAfterOOMThreshold"`
+ // EvictionRateBurst defines the burst of pods that can be evicted (default: 1)
+ // +optional
+ EvictionRateBurst *int32 `json:"evictionRateBurst,omitempty" protobuf:"varint,3,opt,name=evictionRateBurst"`
+ // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will
+ // disable the rate limiter (default: -1).
+ // +optional
+ EvictionRateLimit *float64 `json:"evictionRateLimit,omitempty" protobuf:"fixed64,4,opt,name=evictionRateLimit"`
+ // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one
+ // pod can be evicted (default: 0.5).
+ // +optional
+ EvictionTolerance *float64 `json:"evictionTolerance,omitempty" protobuf:"fixed64,5,opt,name=evictionTolerance"`
+ // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request
+ // (default: 0.15).
+ // +optional
+ RecommendationMarginFraction *float64 `json:"recommendationMarginFraction,omitempty" protobuf:"fixed64,6,opt,name=recommendationMarginFraction"`
+ // UpdaterInterval is the interval how often the updater should run (default: 1m0s).
+ // +optional
+ UpdaterInterval *metav1.Duration `json:"updaterInterval,omitempty" protobuf:"bytes,7,opt,name=updaterInterval"`
+ // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s).
+ // +optional
+ RecommenderInterval *metav1.Duration `json:"recommenderInterval,omitempty" protobuf:"bytes,8,opt,name=recommenderInterval"`
+}
+
+const (
+ // DefaultEvictionRateBurst is the default value for the EvictionRateBurst field in the VPA configuration.
+ DefaultEvictionRateBurst int32 = 1
+ // DefaultEvictionRateLimit is the default value for the EvictionRateLimit field in the VPA configuration.
+ DefaultEvictionRateLimit float64 = -1
+ // DefaultEvictionTolerance is the default value for the EvictionTolerance field in the VPA configuration.
+ DefaultEvictionTolerance = 0.5
+ // DefaultRecommendationMarginFraction is the default value for the RecommendationMarginFraction field in the VPA configuration.
+ DefaultRecommendationMarginFraction = 0.15
+)
+
+var (
+ // DefaultEvictAfterOOMThreshold is the default value for the EvictAfterOOMThreshold field in the VPA configuration.
+ DefaultEvictAfterOOMThreshold = metav1.Duration{Duration: 10 * time.Minute}
+ // DefaultUpdaterInterval is the default value for the UpdaterInterval field in the VPA configuration.
+ DefaultUpdaterInterval = metav1.Duration{Duration: time.Minute}
+ // DefaultRecommenderInterval is the default value for the RecommenderInterval field in the VPA configuration.
+ DefaultRecommenderInterval = metav1.Duration{Duration: time.Minute}
+)
+
+// KubernetesConfig contains common configuration fields for the control plane components.
+type KubernetesConfig struct {
+ // FeatureGates contains information about enabled feature gates.
+ // +optional
+ FeatureGates map[string]bool `json:"featureGates,omitempty" protobuf:"bytes,1,rep,name=featureGates"`
+}
+
+// KubeAPIServerConfig contains configuration settings for the kube-apiserver.
+type KubeAPIServerConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding
+ // configuration.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ AdmissionPlugins []AdmissionPlugin `json:"admissionPlugins,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=admissionPlugins"`
+ // APIAudiences are the identifiers of the API. The service account token authenticator will
+ // validate that tokens used against the API are bound to at least one of these audiences.
+ // Defaults to ["kubernetes"].
+ // +optional
+ APIAudiences []string `json:"apiAudiences,omitempty" protobuf:"bytes,3,rep,name=apiAudiences"`
+ // AuditConfig contains configuration settings for the audit of the kube-apiserver.
+ // +optional
+ AuditConfig *AuditConfig `json:"auditConfig,omitempty" protobuf:"bytes,4,opt,name=auditConfig"`
+ // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not.
+ // +optional
+ EnableBasicAuthentication *bool `json:"enableBasicAuthentication,omitempty" protobuf:"varint,5,opt,name=enableBasicAuthentication"`
+ // OIDCConfig contains configuration settings for the OIDC provider.
+ // +optional
+ OIDCConfig *OIDCConfig `json:"oidcConfig,omitempty" protobuf:"bytes,6,opt,name=oidcConfig"`
+ // RuntimeConfig contains information about enabled or disabled APIs.
+ // +optional
+ RuntimeConfig map[string]bool `json:"runtimeConfig,omitempty" protobuf:"bytes,7,rep,name=runtimeConfig"`
+ // ServiceAccountConfig contains configuration settings for the service account handling
+ // of the kube-apiserver.
+ // +optional
+ ServiceAccountConfig *ServiceAccountConfig `json:"serviceAccountConfig,omitempty" protobuf:"bytes,8,opt,name=serviceAccountConfig"`
+ // WatchCacheSizes contains configuration of the API server's watch cache sizes.
+ // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests
+ // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's
+ // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it
+ // might happen that controller watches are permanently stopped with `too old resource version` errors.
+ // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch
+ // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache).
+ // +optional
+ WatchCacheSizes *WatchCacheSizes `json:"watchCacheSizes,omitempty" protobuf:"bytes,9,opt,name=watchCacheSizes"`
+ // Requests contains configuration for request-specific settings for the kube-apiserver.
+ // +optional
+ Requests *KubeAPIServerRequests `json:"requests,omitempty" protobuf:"bytes,10,opt,name=requests"`
+}
+
+// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver.
+type KubeAPIServerRequests struct {
+ // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ MaxNonMutatingInflight *int32 `json:"maxNonMutatingInflight,omitempty" protobuf:"bytes,1,name=maxNonMutatingInflight"`
+ // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ MaxMutatingInflight *int32 `json:"maxMutatingInflight,omitempty" protobuf:"bytes,2,name=maxMutatingInflight"`
+}
+
+// ServiceAccountConfig is the kube-apiserver configuration for service accounts.
+type ServiceAccountConfig struct {
+ // Issuer is the identifier of the service account token issuer. The issuer will assert this
+ // identifier in "iss" claim of issued tokens. This value is a string or URI.
+ // Defaults to URI of the API server.
+ // +optional
+ Issuer *string `json:"issuer,omitempty" protobuf:"bytes,1,opt,name=issuer"`
+ // SigningKeySecret is a reference to a secret that contains an optional private key of the
+ // service account token issuer. The issuer will sign issued ID tokens with this private key.
+ // Only useful if service account tokens are also issued by another external system.
+ // +optional
+ SigningKeySecret *corev1.LocalObjectReference `json:"signingKeySecretName,omitempty" protobuf:"bytes,2,opt,name=signingKeySecretName"`
+}
+
+// AuditConfig contains settings for audit of the api server
+type AuditConfig struct {
+ // AuditPolicy contains configuration settings for audit policy of the kube-apiserver.
+ // +optional
+ AuditPolicy *AuditPolicy `json:"auditPolicy,omitempty" protobuf:"bytes,1,opt,name=auditPolicy"`
+}
+
+// AuditPolicy contains audit policy for kube-apiserver
+type AuditPolicy struct {
+ // ConfigMapRef is a reference to a ConfigMap object in the same namespace,
+ // which contains the audit policy for the kube-apiserver.
+ // +optional
+ ConfigMapRef *corev1.ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
+}
+
+// OIDCConfig contains configuration settings for the OIDC provider.
+// Note: Descriptions were taken from the Kubernetes documentation.
+type OIDCConfig struct {
+ // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.
+ // +optional
+ CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"`
+ // ClientAuthentication can optionally contain client configuration used for kubeconfig generation.
+ // +optional
+ ClientAuthentication *OpenIDConnectClientAuthentication `json:"clientAuthentication,omitempty" protobuf:"bytes,2,opt,name=clientAuthentication"`
+ // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.
+ // +optional
+ ClientID *string `json:"clientID,omitempty" protobuf:"bytes,3,opt,name=clientID"`
+ // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.
+ // +optional
+ GroupsClaim *string `json:"groupsClaim,omitempty" protobuf:"bytes,4,opt,name=groupsClaim"`
+ // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.
+ // +optional
+ GroupsPrefix *string `json:"groupsPrefix,omitempty" protobuf:"bytes,5,opt,name=groupsPrefix"`
+ // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).
+ // +optional
+ IssuerURL *string `json:"issuerURL,omitempty" protobuf:"bytes,6,opt,name=issuerURL"`
+ // ATTENTION: Only meaningful for Kubernetes >= 1.11
+ // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value.
+ // +optional
+ RequiredClaims map[string]string `json:"requiredClaims,omitempty" protobuf:"bytes,7,rep,name=requiredClaims"`
+ // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1
+ // +optional
+ SigningAlgs []string `json:"signingAlgs,omitempty" protobuf:"bytes,8,rep,name=signingAlgs"`
+ // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub")
+ // +optional
+ UsernameClaim *string `json:"usernameClaim,omitempty" protobuf:"bytes,9,opt,name=usernameClaim"`
+ // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
+ // +optional
+ UsernamePrefix *string `json:"usernamePrefix,omitempty" protobuf:"bytes,10,opt,name=usernamePrefix"`
+}
+
+// OpenIDConnectClientAuthentication contains configuration for OIDC clients.
+type OpenIDConnectClientAuthentication struct {
+ // Extra configuration added to kubeconfig's auth-provider.
+ // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token
+ // +optional
+ ExtraConfig map[string]string `json:"extraConfig,omitempty" protobuf:"bytes,1,rep,name=extraConfig"`
+ // The client Secret for the OpenID Connect client.
+ // +optional
+ Secret *string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+}
+
+// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration.
+type AdmissionPlugin struct {
+ // Name is the name of the plugin.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Config is the configuration of the plugin.
+ // +optional
+ Config *runtime.RawExtension `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
+}
+
+// WatchCacheSizes contains configuration of the API server's watch cache sizes.
+type WatchCacheSizes struct {
+ // Default configures the default watch cache size of the kube-apiserver
+ // (flag `--default-watch-cache-size`, defaults to 100).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ Default *int32 `json:"default,omitempty" protobuf:"varint,1,opt,name=default"`
+ // Resources configures the watch cache size of the kube-apiserver per resource
+ // (flag `--watch-cache-sizes`).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ Resources []ResourceWatchCacheSize `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
+}
+
+// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource.
+type ResourceWatchCacheSize struct {
+ // APIGroup is the API group of the resource for which the watch cache size should be configured.
+ // An unset value is used to specify the legacy core API (e.g. for `secrets`).
+ // +optional
+ APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
+ // Resource is the name of the resource for which the watch cache size should be configured
+ // (in lowercase plural form, e.g. `secrets`).
+ Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+ // CacheSize specifies the watch cache size that should be configured for the specified resource.
+ CacheSize int32 `json:"size" protobuf:"varint,3,opt,name=size"`
+}
+
+// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.
+type KubeControllerManagerConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+ // +optional
+ HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscaler,omitempty" protobuf:"bytes,2,opt,name=horizontalPodAutoscaler"`
+ // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24)
+ // +optional
+ NodeCIDRMaskSize *int32 `json:"nodeCIDRMaskSize,omitempty" protobuf:"varint,3,opt,name=nodeCIDRMaskSize"`
+ // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.
+ // +optional
+ PodEvictionTimeout *metav1.Duration `json:"podEvictionTimeout,omitempty" protobuf:"bytes,4,opt,name=podEvictionTimeout"`
+}
+
+// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+// Note: Descriptions were taken from the Kubernetes documentation.
+type HorizontalPodAutoscalerConfig struct {
+ // The period after which a ready pod transition is considered to be the first.
+ // +optional
+ CPUInitializationPeriod *metav1.Duration `json:"cpuInitializationPeriod,omitempty" protobuf:"bytes,1,opt,name=cpuInitializationPeriod"`
+ // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.
+ // +optional
+ DownscaleDelay *metav1.Duration `json:"downscaleDelay,omitempty" protobuf:"bytes,2,opt,name=downscaleDelay"`
+ // The configurable window at which the controller will choose the highest recommendation for autoscaling.
+ // +optional
+ DownscaleStabilization *metav1.Duration `json:"downscaleStabilization,omitempty" protobuf:"bytes,3,opt,name=downscaleStabilization"`
+ // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time.
+ // +optional
+ InitialReadinessDelay *metav1.Duration `json:"initialReadinessDelay,omitempty" protobuf:"bytes,4,opt,name=initialReadinessDelay"`
+ // The period for syncing the number of pods in horizontal pod autoscaler.
+ // +optional
+ SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty" protobuf:"bytes,5,opt,name=syncPeriod"`
+ // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.
+ // +optional
+ Tolerance *float64 `json:"tolerance,omitempty" protobuf:"fixed64,6,opt,name=tolerance"`
+ // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.
+ // +optional
+ UpscaleDelay *metav1.Duration `json:"upscaleDelay,omitempty" protobuf:"bytes,7,opt,name=upscaleDelay"`
+}
+
+const (
+ // DefaultHPADownscaleDelay is a constant for the default HPA downscale delay for a Shoot cluster.
+ DefaultHPADownscaleDelay = 15 * time.Minute
+ // DefaultHPASyncPeriod is a constant for the default HPA sync period for a Shoot cluster.
+ DefaultHPASyncPeriod = 30 * time.Second
+ // DefaultHPATolerance is a constant for the default HPA tolerance for a Shoot cluster.
+ DefaultHPATolerance = 0.1
+ // DefaultHPAUpscaleDelay is for the default HPA upscale delay for a Shoot cluster.
+ DefaultHPAUpscaleDelay = 1 * time.Minute
+ // DefaultDownscaleStabilization is the default HPA downscale stabilization window for a Shoot cluster
+ DefaultDownscaleStabilization = 5 * time.Minute
+ // DefaultInitialReadinessDelay is for the default HPA ReadinessDelay value in the Shoot cluster
+ DefaultInitialReadinessDelay = 30 * time.Second
+ // DefaultCPUInitializationPeriod is the for the default value of the CPUInitializationPeriod in the Shoot cluster
+ DefaultCPUInitializationPeriod = 5 * time.Minute
+)
+
+// KubeSchedulerConfig contains configuration settings for the kube-scheduler.
+type KubeSchedulerConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler.
+ // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits
+ // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware
+ // of all the side-effects and consequences when changing it.
+ // +optional
+ KubeMaxPDVols *string `json:"kubeMaxPDVols,omitempty" protobuf:"bytes,2,opt,name=kubeMaxPDVols"`
+}
+
+// KubeProxyConfig contains configuration settings for the kube-proxy.
+type KubeProxyConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // Mode specifies which proxy mode to use.
+ // defaults to IPTables.
+ // +optional
+ Mode *ProxyMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode,casttype=ProxyMode"`
+}
+
+// ProxyMode available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
+// (newer, faster), 'ipvs' (newest, better in performance and scalability).
+// As of now only 'iptables' and 'ipvs' is supported by Gardener.
+// In Linux platform, if the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
+// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
+// and the fall back path is firstly iptables and then userspace.
+type ProxyMode string
+
+const (
+ // ProxyModeIPTables uses iptables as proxy implementation.
+ ProxyModeIPTables ProxyMode = "IPTables"
+ // ProxyModeIPVS uses ipvs as proxy implementation.
+ ProxyModeIPVS ProxyMode = "IPVS"
+)
+
+// KubeletConfig contains configuration settings for the kubelet.
+type KubeletConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // CPUCFSQuota allows you to disable/enable CPU throttling for Pods.
+ // +optional
+ CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty" protobuf:"varint,2,opt,name=cpuCFSQuota"`
+ // CPUManagerPolicy allows to set alternative CPU management policies (default: none).
+ // +optional
+ CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" protobuf:"bytes,3,opt,name=cpuManagerPolicy"`
+ // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "100Mi/1Gi/5%"
+ // nodefs.available: "5%"
+ // nodefs.inodesFree: "5%"
+ // imagefs.available: "5%"
+ // imagefs.inodesFree: "5%"
+ EvictionHard *KubeletConfigEviction `json:"evictionHard,omitempty" protobuf:"bytes,4,opt,name=evictionHard"`
+ // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
+ // +optional
+ // Default: 90
+ EvictionMaxPodGracePeriod *int32 `json:"evictionMaxPodGracePeriod,omitempty" protobuf:"varint,5,opt,name=evictionMaxPodGracePeriod"`
+ // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure.
+ // +optional
+ // Default: 0 for each resource
+ EvictionMinimumReclaim *KubeletConfigEvictionMinimumReclaim `json:"evictionMinimumReclaim,omitempty" protobuf:"bytes,6,opt,name=evictionMinimumReclaim"`
+ // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
+ // +optional
+ // Default: 4m0s
+ EvictionPressureTransitionPeriod *metav1.Duration `json:"evictionPressureTransitionPeriod,omitempty" protobuf:"bytes,7,opt,name=evictionPressureTransitionPeriod"`
+ // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "200Mi/1.5Gi/10%"
+ // nodefs.available: "10%"
+ // nodefs.inodesFree: "10%"
+ // imagefs.available: "10%"
+ // imagefs.inodesFree: "10%"
+ EvictionSoft *KubeletConfigEviction `json:"evictionSoft,omitempty" protobuf:"bytes,8,opt,name=evictionSoft"`
+ // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: 1m30s
+ // nodefs.available: 1m30s
+ // nodefs.inodesFree: 1m30s
+ // imagefs.available: 1m30s
+ // imagefs.inodesFree: 1m30s
+ EvictionSoftGracePeriod *KubeletConfigEvictionSoftGracePeriod `json:"evictionSoftGracePeriod,omitempty" protobuf:"bytes,9,opt,name=evictionSoftGracePeriod"`
+ // MaxPods is the maximum number of Pods that are allowed by the Kubelet.
+ // +optional
+ // Default: 110
+ MaxPods *int32 `json:"maxPods,omitempty" protobuf:"varint,10,opt,name=maxPods"`
+ // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet.
+ // +optional
+ PodPIDsLimit *int64 `json:"podPidsLimit,omitempty" protobuf:"varint,11,opt,name=podPidsLimit"`
+ // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled.
+ // +optional
+ // Default: 1m
+ ImagePullProgressDeadline *metav1.Duration `json:"imagePullProgressDeadline,omitempty" protobuf:"bytes,12,opt,name=imagePullProgressDeadline"`
+ // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true).
+ // +optional
+ FailSwapOn *bool `json:"failSwapOn,omitempty" protobuf:"varint,13,opt,name=failSwapOn"`
+ // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ // Default: cpu=80m,memory=1Gi,pid=20k
+ KubeReserved *KubeletConfigReserved `json:"kubeReserved,omitempty" protobuf:"bytes,14,opt,name=kubeReserved"`
+ // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ SystemReserved *KubeletConfigReserved `json:"systemReserved,omitempty" protobuf:"bytes,15,opt,name=systemReserved"`
+}
+
+// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value.
+type KubeletConfigEviction struct {
+ // MemoryAvailable is the threshold for the free memory on the host server.
+ // +optional
+ MemoryAvailable *string `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"`
+ // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ ImageFSAvailable *string `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"`
+ // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem.
+ // +optional
+ ImageFSInodesFree *string `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"`
+ // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ NodeFSAvailable *string `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"`
+ // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem.
+ // +optional
+ NodeFSInodesFree *string `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"`
+}
+
+// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim.
+type KubeletConfigEvictionMinimumReclaim struct {
+ // MemoryAvailable is the threshold for the memory reclaim on the host server.
+ // +optional
+ MemoryAvailable *resource.Quantity `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"`
+ // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ ImageFSAvailable *resource.Quantity `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"`
+ // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem.
+ // +optional
+ ImageFSInodesFree *resource.Quantity `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"`
+ // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ NodeFSAvailable *resource.Quantity `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"`
+ // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem.
+ // +optional
+ NodeFSInodesFree *resource.Quantity `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"`
+}
+
+// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds.
+type KubeletConfigEvictionSoftGracePeriod struct {
+ // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold.
+ // +optional
+ MemoryAvailable *metav1.Duration `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"`
+ // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold.
+ // +optional
+ ImageFSAvailable *metav1.Duration `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"`
+ // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold.
+ // +optional
+ ImageFSInodesFree *metav1.Duration `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"`
+ // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold.
+ // +optional
+ NodeFSAvailable *metav1.Duration `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"`
+ // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold.
+ // +optional
+ NodeFSInodesFree *metav1.Duration `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"`
+}
+
+// KubeletConfigReserved contains reserved resources for daemons
+type KubeletConfigReserved struct {
+ // CPU is the reserved cpu.
+ // +optional
+ CPU *resource.Quantity `json:"cpu,omitempty" protobuf:"bytes,1,opt,name=cpu"`
+ // Memory is the reserved memory.
+ // +optional
+ Memory *resource.Quantity `json:"memory,omitempty" protobuf:"bytes,2,opt,name=memory"`
+ // EphemeralStorage is the reserved ephemeral-storage.
+ // +optional
+ EphemeralStorage *resource.Quantity `json:"ephemeralStorage,omitempty" protobuf:"bytes,3,opt,name=ephemeralStorage"`
+ // PID is the reserved process-ids.
+ // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15.
+ // +optional
+ PID *resource.Quantity `json:"pid,omitempty" protobuf:"bytes,4,opt,name=pid"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Networking relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Networking defines networking parameters for the shoot cluster.
+type Networking struct {
+ // Type identifies the type of the networking plugin.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig is the configuration passed to network resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Pods is the CIDR of the pod network.
+ // +optional
+ Pods *string `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+ // Nodes is the CIDR of the entire node network.
+ // +optional
+ Nodes *string `json:"nodes,omitempty" protobuf:"bytes,4,opt,name=nodes"`
+ // Services is the CIDR of the service network.
+ // +optional
+ Services *string `json:"services,omitempty" protobuf:"bytes,5,opt,name=services"`
+}
+
+const (
+ // DefaultPodNetworkCIDR is a constant for the default pod network CIDR of a Shoot cluster.
+ DefaultPodNetworkCIDR = "100.96.0.0/11"
+ // DefaultServiceNetworkCIDR is a constant for the default service network CIDR of a Shoot cluster.
+ DefaultServiceNetworkCIDR = "100.64.0.0/13"
+)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Maintenance relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+const (
+ // MaintenanceTimeWindowDurationMinimum is the minimum duration for a maintenance time window.
+ MaintenanceTimeWindowDurationMinimum = 30 * time.Minute
+ // MaintenanceTimeWindowDurationMaximum is the maximum duration for a maintenance time window.
+ MaintenanceTimeWindowDurationMaximum = 6 * time.Hour
+)
+
+// Maintenance contains information about the time window for maintenance operations and which
+// operations should be performed.
+type Maintenance struct {
+ // AutoUpdate contains information about which constraints should be automatically updated.
+ // +optional
+ AutoUpdate *MaintenanceAutoUpdate `json:"autoUpdate,omitempty" protobuf:"bytes,1,opt,name=autoUpdate"`
+ // TimeWindow contains information about the time window for maintenance operations.
+ // +optional
+ TimeWindow *MaintenanceTimeWindow `json:"timeWindow,omitempty" protobuf:"bytes,2,opt,name=timeWindow"`
+ // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately.
+ // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger
+ // an immediate roll out which is changes to the Spec.Hibernation.Enabled field.
+ // +optional
+ ConfineSpecUpdateRollout *bool `json:"confineSpecUpdateRollout,omitempty" protobuf:"varint,3,opt,name=confineSpecUpdateRollout"`
+}
+
+// MaintenanceAutoUpdate contains information about which constraints should be automatically updated.
+type MaintenanceAutoUpdate struct {
+ // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true).
+ KubernetesVersion bool `json:"kubernetesVersion" protobuf:"varint,1,opt,name=kubernetesVersion"`
+ // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true).
+ MachineImageVersion bool `json:"machineImageVersion" protobuf:"varint,2,opt,name=machineImageVersion"`
+}
+
+// MaintenanceTimeWindow contains information about the time window for maintenance operations.
+type MaintenanceTimeWindow struct {
+ // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, a random value will be computed.
+ Begin string `json:"begin" protobuf:"bytes,1,opt,name=begin"`
+ // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, the value will be computed based on the "Begin" value.
+ End string `json:"end" protobuf:"bytes,2,opt,name=end"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Monitoring relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Monitoring contains information about the monitoring configuration for the shoot.
+type Monitoring struct {
+ // Alerting contains information about the alerting configuration for the shoot cluster.
+ // +optional
+ Alerting *Alerting `json:"alerting,omitempty" protobuf:"bytes,1,opt,name=alerting"`
+}
+
+// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how).
+type Alerting struct {
+ // MonitoringEmailReceivers is a list of recipients for alerts
+ // +optional
+ EmailReceivers []string `json:"emailReceivers,omitempty" protobuf:"bytes,1,rep,name=emailReceivers"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Provider relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Provider contains provider-specific information that are handed-over to the provider-specific
+// extension controller.
+type Provider struct {
+ // Type is the type of the provider.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ ControlPlaneConfig *runtime.RawExtension `json:"controlPlaneConfig,omitempty" protobuf:"bytes,2,opt,name=controlPlaneConfig"`
+ // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ InfrastructureConfig *runtime.RawExtension `json:"infrastructureConfig,omitempty" protobuf:"bytes,3,opt,name=infrastructureConfig"`
+ // Workers is a list of worker groups.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Workers []Worker `json:"workers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=workers"`
+}
+
+// Worker is the base definition of a worker group.
+type Worker struct {
+ // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"`
+ // CABundle is a certificate bundle which will be installed onto every machine of this worker pool.
+ // +optional
+ CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"`
+ // CRI contains configurations of CRI support of every machine in the worker pool
+ // +optional
+ CRI *CRI `json:"cri,omitempty" protobuf:"bytes,3,opt,name=cri"`
+ // Kubernetes contains configuration for Kubernetes components related to this worker pool.
+ // +optional
+ Kubernetes *WorkerKubernetes `json:"kubernetes,omitempty" protobuf:"bytes,4,opt,name=kubernetes"`
+ // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"`
+ // Name is the name of the worker group.
+ Name string `json:"name" protobuf:"bytes,6,opt,name=name"`
+ // Machine contains information about the machine type and image.
+ Machine Machine `json:"machine" protobuf:"bytes,7,opt,name=machine"`
+ // Maximum is the maximum number of VMs to create.
+ Maximum int32 `json:"maximum" protobuf:"varint,8,opt,name=maximum"`
+ // Minimum is the minimum number of VMs to create.
+ Minimum int32 `json:"minimum" protobuf:"varint,9,opt,name=minimum"`
+ // MaxSurge is maximum number of VMs that are created during an update.
+ // +optional
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,10,opt,name=maxSurge"`
+ // MaxUnavailable is the maximum number of VMs that can be unavailable during an update.
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,11,opt,name=maxUnavailable"`
+ // ProviderConfig is the provider-specific configuration for this worker pool.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,12,opt,name=providerConfig"`
+ // Taints is a list of taints for all the `Node` objects in this worker pool.
+ // +optional
+ Taints []corev1.Taint `json:"taints,omitempty" protobuf:"bytes,13,rep,name=taints"`
+ // Volume contains information about the volume type and size.
+ // +optional
+ Volume *Volume `json:"volume,omitempty" protobuf:"bytes,14,opt,name=volume"`
+ // DataVolumes contains a list of additional worker volumes.
+ // +optional
+ DataVolumes []DataVolume `json:"dataVolumes,omitempty" protobuf:"bytes,15,rep,name=dataVolumes"`
+ // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state.
+ // +optional
+ KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty" protobuf:"bytes,16,opt,name=kubeletDataVolumeName"`
+ // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional
+ // as not every provider may support availability zones.
+ // +optional
+ Zones []string `json:"zones,omitempty" protobuf:"bytes,17,rep,name=zones"`
+ // SystemComponents contains configuration for system components related to this worker pool
+ // +optional
+ SystemComponents *WorkerSystemComponents `json:"systemComponents,omitempty" protobuf:"bytes,18,opt,name=systemComponents"`
+ // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+ // +optional
+ MachineControllerManagerSettings *MachineControllerManagerSettings `json:"machineControllerManager,omitempty" protobuf:"bytes,19,opt,name=machineControllerManager"`
+}
+
+// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+type MachineControllerManagerSettings struct {
+ // MachineDrainTimeout is the period after which machine is forcefully deleted.
+ // +optional
+ MachineDrainTimeout *metav1.Duration `json:"machineDrainTimeout,omitempty" protobuf:"bytes,1,name=machineDrainTimeout"`
+ // MachineHealthTimeout is the period after which machine is declared failed.
+ // +optional
+ MachineHealthTimeout *metav1.Duration `json:"machineHealthTimeout,omitempty" protobuf:"bytes,2,name=machineHealthTimeout"`
+ // MachineCreationTimeout is the period after which creation of the machine is declared failed.
+ // +optional
+ MachineCreationTimeout *metav1.Duration `json:"machineCreationTimeout,omitempty" protobuf:"bytes,3,name=machineCreationTimeout"`
+ // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered.
+ // +optional
+ MaxEvictRetries *int32 `json:"maxEvictRetries,omitempty" protobuf:"bytes,4,name=maxEvictRetries"`
+ // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed.
+ // +optional
+ NodeConditions []string `json:"nodeConditions,omitempty" protobuf:"bytes,5,name=nodeConditions"`
+}
+
+// WorkerSystemComponents contains configuration for system components related to this worker pool
+type WorkerSystemComponents struct {
+ // Allow determines whether the pool should be allowed to host system components or not (defaults to true)
+ Allow bool `json:"allow" protobuf:"bytes,1,name=allow"`
+}
+
+// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool.
+type WorkerKubernetes struct {
+ // Kubelet contains configuration settings for all kubelets of this worker pool.
+ // +optional
+ Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,1,opt,name=kubelet"`
+}
+
+// Machine contains information about the machine type and image.
+type Machine struct {
+ // Type is the machine type of the worker group.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // Image holds information about the machine image to use for all nodes of this pool. It will default to the
+ // latest version of the first image stated in the referenced CloudProfile if no value has been provided.
+ // +optional
+ Image *ShootMachineImage `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+}
+
+// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be
+// defined in the respective CloudProfile.
+type ShootMachineImage struct {
+ // Name is the name of the image.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // ProviderConfig is the shoot's individual configuration passed to an extension resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Version is the version of the shoot's image.
+ // If version is not provided, it will be defaulted to the latest version from the CloudProfile.
+ // +optional
+ Version *string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"`
+}
+
+// Volume contains information about the volume type and size.
+type Volume struct {
+ // Name of the volume to make it referencable.
+ // +optional
+ Name *string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+ // Type is the type of the volume.
+ // +optional
+ Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
+ // Size is the size of the volume.
+ VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"`
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=primary"`
+}
+
+// DataVolume contains information about a data volume.
+type DataVolume struct {
+ // Name of the volume to make it referencable.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Type is the type of the volume.
+ // +optional
+ Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
+ // VolumeSize is the size of the volume.
+ VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"`
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=encrypted"`
+}
+
+// CRI contains information about the Container Runtimes.
+type CRI struct {
+ // The name of the CRI library
+ Name CRIName `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // ContainerRuntimes is the list of the required container runtimes supported for a worker pool.
+ // +optional
+ ContainerRuntimes []ContainerRuntime `json:"containerRuntimes,omitempty" protobuf:"bytes,2,rep,name=containerRuntimes"`
+}
+
+// CRIName is a type alias for the CRI name string.
+type CRIName string
+
+const (
+ CRINameContainerD CRIName = "containerd"
+)
+
+// ContainerRuntime contains information about worker's available container runtime
+type ContainerRuntime struct {
+ // Type is the type of the Container Runtime.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=name"`
+ // ProviderConfig is the configuration passed to the ContainerRuntime resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+}
+
+var (
+ // DefaultWorkerMaxSurge is the default value for Worker MaxSurge.
+ DefaultWorkerMaxSurge = intstr.FromInt(1)
+ // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable.
+ DefaultWorkerMaxUnavailable = intstr.FromInt(0)
+ // DefaultWorkerSystemComponentsAllow is the default value for Worker AllowSystemComponents
+ DefaultWorkerSystemComponentsAllow = true
+)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Other/miscellaneous constants and types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+const (
+ // ShootEventImageVersionMaintenance indicates that a maintenance operation regarding the image version has been performed.
+ ShootEventImageVersionMaintenance = "MachineImageVersionMaintenance"
+ // ShootEventK8sVersionMaintenance indicates that a maintenance operation regarding the K8s version has been performed.
+ ShootEventK8sVersionMaintenance = "KubernetesVersionMaintenance"
+ // ShootEventHibernationEnabled indicates that hibernation started.
+ ShootEventHibernationEnabled = "Hibernated"
+ // ShootEventHibernationDisabled indicates that hibernation ended.
+ ShootEventHibernationDisabled = "WokenUp"
+ // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully.
+ ShootEventSchedulingSuccessful = "SchedulingSuccessful"
+ // ShootEventSchedulingFailed indicates that a scheduling decision failed.
+ ShootEventSchedulingFailed = "SchedulingFailed"
+)
+
+const (
+ // ShootAPIServerAvailable is a constant for a condition type indicating that the Shoot cluster's API server is available.
+ ShootAPIServerAvailable ConditionType = "APIServerAvailable"
+ // ShootControlPlaneHealthy is a constant for a condition type indicating the control plane health.
+ ShootControlPlaneHealthy ConditionType = "ControlPlaneHealthy"
+ // ShootEveryNodeReady is a constant for a condition type indicating the node health.
+ ShootEveryNodeReady ConditionType = "EveryNodeReady"
+ // ShootSystemComponentsHealthy is a constant for a condition type indicating the system components health.
+ ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy"
+ // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated.
+ ShootHibernationPossible ConditionType = "HibernationPossible"
+ // ShootMaintenancePreconditionsSatisfied is a constant for a condition type indicating whether all preconditions
+ // for a shoot maintenance operation are satisfied.
+ ShootMaintenancePreconditionsSatisfied ConditionType = "MaintenancePreconditionsSatisfied"
+)
+
+// ShootPurpose is a type alias for string.
+type ShootPurpose string
+
+const (
+ // ShootPurposeEvaluation is a constant for the evaluation purpose.
+ ShootPurposeEvaluation ShootPurpose = "evaluation"
+ // ShootPurposeTesting is a constant for the testing purpose.
+ ShootPurposeTesting ShootPurpose = "testing"
+ // ShootPurposeDevelopment is a constant for the development purpose.
+ ShootPurposeDevelopment ShootPurpose = "development"
+ // ShootPurposeProduction is a constant for the production purpose.
+ ShootPurposeProduction ShootPurpose = "production"
+ // ShootPurposeInfrastructure is a constant for the infrastructure purpose.
+ ShootPurposeInfrastructure ShootPurpose = "infrastructure"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go
new file mode 100644
index 0000000..4586065
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootState contains a snapshot of the Shoot's state required to migrate the Shoot's control plane to a new Seed.
+type ShootState struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the ShootState.
+ // +optional
+ Spec ShootStateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootStateList is a list of ShootState objects.
+type ShootStateList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of ShootStates.
+ Items []ShootState `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ShootStateSpec is the specification of the ShootState.
+type ShootStateSpec struct {
+ // Gardener holds the data required to generate resources deployed by the gardenlet
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Gardener []GardenerResourceData `json:"gardener,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=gardener"`
+ // Extensions holds the state of custom resources reconciled by extension controllers in the seed
+ // +optional
+ Extensions []ExtensionResourceState `json:"extensions,omitempty" protobuf:"bytes,2,rep,name=extensions"`
+ // Resources holds the data of resources referred to by extension controller states
+ // +optional
+ Resources []ResourceData `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
+}
+
+// GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane.
+type GardenerResourceData struct {
+ // Name of the object required to generate resources
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Type of the object
+ Type string `json:"type" protobuf:"bytes,2,opt,name=type"`
+ // Data contains the payload required to generate resources
+ Data runtime.RawExtension `json:"data" protobuf:"bytes,3,opt,name=data"`
+}
+
+// ExtensionResourceState contains the kind of the extension custom resource and its last observed state in the Shoot's
+// namespace on the Seed cluster.
+type ExtensionResourceState struct {
+ // Kind (type) of the extension custom resource
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // Name of the extension custom resource
+ // +optional
+ Name *string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
+ // Purpose of the extension custom resource
+ // +optional
+ Purpose *string `json:"purpose,omitempty" protobuf:"bytes,3,opt,name=purpose"`
+ // State of the extension resource
+ // +optional
+ State *runtime.RawExtension `json:"state,omitempty" protobuf:"bytes,4,opt,name=state"`
+ // Resources holds a list of named resource references that can be referred to in the state by their names.
+ // +optional
+ Resources []gardencorev1beta1.NamedResourceReference `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"`
+}
+
+// ResourceData holds the data of a resource referred to by an extension controller state.
+type ResourceData struct {
+ autoscalingv1.CrossVersionObjectReference `json:",inline" protobuf:"bytes,1,opt,name=ref"`
+ // Data of the resource
+ Data runtime.RawExtension `json:"data" protobuf:"bytes,2,opt,name=data"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go
new file mode 100644
index 0000000..a218d0b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // EventSchedulingSuccessful is an event reason for successful scheduling.
+ EventSchedulingSuccessful = "SchedulingSuccessful"
+ // EventSchedulingFailed is an event reason for failed scheduling.
+ EventSchedulingFailed = "SchedulingFailed"
+)
+
+// ConditionStatus is the status of a condition.
+type ConditionStatus string
+
+// ConditionType is a string alias.
+type ConditionType string
+
+// Condition holds the information about the state of a resource.
+type Condition struct {
+ // Type of the Shoot condition.
+ Type ConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // Last time the condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,4,opt,name=lastUpdateTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message" protobuf:"bytes,6,opt,name=message"`
+ // Well-defined error codes in case the condition reports a problem.
+ // +optional
+ Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,7,rep,name=codes,casttype=ErrorCode"`
+}
+
+const (
+ // ConditionTrue means a resource is in the condition.
+ ConditionTrue ConditionStatus = "True"
+ // ConditionFalse means a resource is not in the condition.
+ ConditionFalse ConditionStatus = "False"
+ // ConditionUnknown means Gardener can't decide if a resource is in the condition or not.
+ ConditionUnknown ConditionStatus = "Unknown"
+ // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold.
+ // In the future, we could add other intermediate conditions, e.g. ConditionDegraded.
+ ConditionProgressing ConditionStatus = "Progressing"
+
+ // ConditionCheckError is a constant for a reason in condition.
+ ConditionCheckError = "ConditionCheckError"
+ // ManagedResourceMissingConditionError is a constant for a reason in a condition that indicates
+ // one or multiple missing conditions in the observed managed resource.
+ ManagedResourceMissingConditionError = "MissingManagedResourceCondition"
+ // OutdatedStatusError is a constant for a reason in a condition that indicates
+ // that the observed generation in a status is outdated.
+ OutdatedStatusError = "OutdatedStatus"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go
new file mode 100644
index 0000000..6169e13
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go
@@ -0,0 +1,5035 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ unsafe "unsafe"
+
+ core "github.com/gardener/gardener/pkg/apis/core"
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ resource "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ types "k8s.io/apimachinery/pkg/types"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*core.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Addon_To_core_Addon(a.(*Addon), b.(*core.Addon), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Addon_To_v1alpha1_Addon(a.(*core.Addon), b.(*Addon), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Addons)(nil), (*core.Addons)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Addons_To_core_Addons(a.(*Addons), b.(*core.Addons), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Addons)(nil), (*Addons)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Addons_To_v1alpha1_Addons(a.(*core.Addons), b.(*Addons), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AdmissionPlugin)(nil), (*core.AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(a.(*AdmissionPlugin), b.(*core.AdmissionPlugin), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AdmissionPlugin)(nil), (*AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(a.(*core.AdmissionPlugin), b.(*AdmissionPlugin), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Alerting)(nil), (*core.Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Alerting_To_core_Alerting(a.(*Alerting), b.(*core.Alerting), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Alerting)(nil), (*Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Alerting_To_v1alpha1_Alerting(a.(*core.Alerting), b.(*Alerting), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AuditConfig)(nil), (*core.AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_AuditConfig_To_core_AuditConfig(a.(*AuditConfig), b.(*core.AuditConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AuditConfig)(nil), (*AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AuditConfig_To_v1alpha1_AuditConfig(a.(*core.AuditConfig), b.(*AuditConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AuditPolicy)(nil), (*core.AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_AuditPolicy_To_core_AuditPolicy(a.(*AuditPolicy), b.(*core.AuditPolicy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AuditPolicy)(nil), (*AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AuditPolicy_To_v1alpha1_AuditPolicy(a.(*core.AuditPolicy), b.(*AuditPolicy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AvailabilityZone)(nil), (*core.AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(a.(*AvailabilityZone), b.(*core.AvailabilityZone), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AvailabilityZone)(nil), (*AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(a.(*core.AvailabilityZone), b.(*AvailabilityZone), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketList)(nil), (*core.BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucketList_To_core_BackupBucketList(a.(*BackupBucketList), b.(*core.BackupBucketList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketList)(nil), (*BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketList_To_v1alpha1_BackupBucketList(a.(*core.BackupBucketList), b.(*BackupBucketList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketProvider)(nil), (*core.BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(a.(*BackupBucketProvider), b.(*core.BackupBucketProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketProvider)(nil), (*BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(a.(*core.BackupBucketProvider), b.(*BackupBucketProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketStatus)(nil), (*core.BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(a.(*BackupBucketStatus), b.(*core.BackupBucketStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketStatus)(nil), (*BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(a.(*core.BackupBucketStatus), b.(*BackupBucketStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupEntryList)(nil), (*core.BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupEntryList_To_core_BackupEntryList(a.(*BackupEntryList), b.(*core.BackupEntryList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupEntryList)(nil), (*BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntryList_To_v1alpha1_BackupEntryList(a.(*core.BackupEntryList), b.(*BackupEntryList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupEntryStatus)(nil), (*core.BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(a.(*BackupEntryStatus), b.(*core.BackupEntryStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupEntryStatus)(nil), (*BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(a.(*core.BackupEntryStatus), b.(*BackupEntryStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CRI)(nil), (*core.CRI)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_CRI_To_core_CRI(a.(*CRI), b.(*core.CRI), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CRI)(nil), (*CRI)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CRI_To_v1alpha1_CRI(a.(*core.CRI), b.(*CRI), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudInfo)(nil), (*core.CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_CloudInfo_To_core_CloudInfo(a.(*CloudInfo), b.(*core.CloudInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudInfo)(nil), (*CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudInfo_To_v1alpha1_CloudInfo(a.(*core.CloudInfo), b.(*CloudInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudProfile)(nil), (*core.CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_CloudProfile_To_core_CloudProfile(a.(*CloudProfile), b.(*core.CloudProfile), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudProfile)(nil), (*CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudProfile_To_v1alpha1_CloudProfile(a.(*core.CloudProfile), b.(*CloudProfile), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudProfileList)(nil), (*core.CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_CloudProfileList_To_core_CloudProfileList(a.(*CloudProfileList), b.(*core.CloudProfileList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudProfileList)(nil), (*CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudProfileList_To_v1alpha1_CloudProfileList(a.(*core.CloudProfileList), b.(*CloudProfileList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudProfileSpec)(nil), (*core.CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(a.(*CloudProfileSpec), b.(*core.CloudProfileSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudProfileSpec)(nil), (*CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(a.(*core.CloudProfileSpec), b.(*CloudProfileSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClusterAutoscaler)(nil), (*core.ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(a.(*ClusterAutoscaler), b.(*core.ClusterAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ClusterAutoscaler)(nil), (*ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(a.(*core.ClusterAutoscaler), b.(*ClusterAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClusterInfo)(nil), (*core.ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ClusterInfo_To_core_ClusterInfo(a.(*ClusterInfo), b.(*core.ClusterInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ClusterInfo)(nil), (*ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ClusterInfo_To_v1alpha1_ClusterInfo(a.(*core.ClusterInfo), b.(*ClusterInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Condition)(nil), (*core.Condition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Condition_To_core_Condition(a.(*Condition), b.(*core.Condition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Condition)(nil), (*Condition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Condition_To_v1alpha1_Condition(a.(*core.Condition), b.(*Condition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ContainerRuntime)(nil), (*core.ContainerRuntime)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(a.(*ContainerRuntime), b.(*core.ContainerRuntime), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ContainerRuntime)(nil), (*ContainerRuntime)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(a.(*core.ContainerRuntime), b.(*ContainerRuntime), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerDeployment)(nil), (*core.ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(a.(*ControllerDeployment), b.(*core.ControllerDeployment), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerDeployment)(nil), (*ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(a.(*core.ControllerDeployment), b.(*ControllerDeployment), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallation)(nil), (*core.ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(a.(*ControllerInstallation), b.(*core.ControllerInstallation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallation)(nil), (*ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(a.(*core.ControllerInstallation), b.(*ControllerInstallation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallationList)(nil), (*core.ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(a.(*ControllerInstallationList), b.(*core.ControllerInstallationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationList)(nil), (*ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(a.(*core.ControllerInstallationList), b.(*ControllerInstallationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallationSpec)(nil), (*core.ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(a.(*ControllerInstallationSpec), b.(*core.ControllerInstallationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationSpec)(nil), (*ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(a.(*core.ControllerInstallationSpec), b.(*ControllerInstallationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallationStatus)(nil), (*core.ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(a.(*ControllerInstallationStatus), b.(*core.ControllerInstallationStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationStatus)(nil), (*ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(a.(*core.ControllerInstallationStatus), b.(*ControllerInstallationStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerRegistration)(nil), (*core.ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(a.(*ControllerRegistration), b.(*core.ControllerRegistration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerRegistration)(nil), (*ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(a.(*core.ControllerRegistration), b.(*ControllerRegistration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerRegistrationList)(nil), (*core.ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(a.(*ControllerRegistrationList), b.(*core.ControllerRegistrationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationList)(nil), (*ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(a.(*core.ControllerRegistrationList), b.(*ControllerRegistrationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerRegistrationSpec)(nil), (*core.ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(a.(*ControllerRegistrationSpec), b.(*core.ControllerRegistrationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationSpec)(nil), (*ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(a.(*core.ControllerRegistrationSpec), b.(*ControllerRegistrationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerResource)(nil), (*core.ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ControllerResource_To_core_ControllerResource(a.(*ControllerResource), b.(*core.ControllerResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerResource)(nil), (*ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerResource_To_v1alpha1_ControllerResource(a.(*core.ControllerResource), b.(*ControllerResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*core.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_DNS_To_core_DNS(a.(*DNS), b.(*core.DNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DNS)(nil), (*DNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DNS_To_v1alpha1_DNS(a.(*core.DNS), b.(*DNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DNSIncludeExclude)(nil), (*core.DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(a.(*DNSIncludeExclude), b.(*core.DNSIncludeExclude), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DNSIncludeExclude)(nil), (*DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(a.(*core.DNSIncludeExclude), b.(*DNSIncludeExclude), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DNSProvider)(nil), (*core.DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_DNSProvider_To_core_DNSProvider(a.(*DNSProvider), b.(*core.DNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DNSProvider)(nil), (*DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DNSProvider_To_v1alpha1_DNSProvider(a.(*core.DNSProvider), b.(*DNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DataVolume)(nil), (*core.DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_DataVolume_To_core_DataVolume(a.(*DataVolume), b.(*core.DataVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DataVolume)(nil), (*DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DataVolume_To_v1alpha1_DataVolume(a.(*core.DataVolume), b.(*DataVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Endpoint)(nil), (*core.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Endpoint_To_core_Endpoint(a.(*Endpoint), b.(*core.Endpoint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Endpoint)(nil), (*Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Endpoint_To_v1alpha1_Endpoint(a.(*core.Endpoint), b.(*Endpoint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExpirableVersion)(nil), (*core.ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(a.(*ExpirableVersion), b.(*core.ExpirableVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ExpirableVersion)(nil), (*ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(a.(*core.ExpirableVersion), b.(*ExpirableVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Extension)(nil), (*core.Extension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Extension_To_core_Extension(a.(*Extension), b.(*core.Extension), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Extension)(nil), (*Extension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Extension_To_v1alpha1_Extension(a.(*core.Extension), b.(*Extension), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExtensionResourceState)(nil), (*core.ExtensionResourceState)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(a.(*ExtensionResourceState), b.(*core.ExtensionResourceState), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ExtensionResourceState)(nil), (*ExtensionResourceState)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(a.(*core.ExtensionResourceState), b.(*ExtensionResourceState), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Gardener)(nil), (*core.Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Gardener_To_core_Gardener(a.(*Gardener), b.(*core.Gardener), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Gardener)(nil), (*Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Gardener_To_v1alpha1_Gardener(a.(*core.Gardener), b.(*Gardener), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*GardenerResourceData)(nil), (*core.GardenerResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(a.(*GardenerResourceData), b.(*core.GardenerResourceData), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.GardenerResourceData)(nil), (*GardenerResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(a.(*core.GardenerResourceData), b.(*GardenerResourceData), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Hibernation)(nil), (*core.Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Hibernation_To_core_Hibernation(a.(*Hibernation), b.(*core.Hibernation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Hibernation)(nil), (*Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Hibernation_To_v1alpha1_Hibernation(a.(*core.Hibernation), b.(*Hibernation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*HibernationSchedule)(nil), (*core.HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(a.(*HibernationSchedule), b.(*core.HibernationSchedule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.HibernationSchedule)(nil), (*HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(a.(*core.HibernationSchedule), b.(*HibernationSchedule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*HorizontalPodAutoscalerConfig)(nil), (*core.HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(a.(*HorizontalPodAutoscalerConfig), b.(*core.HorizontalPodAutoscalerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.HorizontalPodAutoscalerConfig)(nil), (*HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(a.(*core.HorizontalPodAutoscalerConfig), b.(*HorizontalPodAutoscalerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Ingress)(nil), (*core.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Ingress_To_core_Ingress(a.(*Ingress), b.(*core.Ingress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Ingress)(nil), (*Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Ingress_To_v1alpha1_Ingress(a.(*core.Ingress), b.(*Ingress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*IngressController)(nil), (*core.IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_IngressController_To_core_IngressController(a.(*IngressController), b.(*core.IngressController), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.IngressController)(nil), (*IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_IngressController_To_v1alpha1_IngressController(a.(*core.IngressController), b.(*IngressController), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeAPIServerConfig)(nil), (*core.KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(a.(*KubeAPIServerConfig), b.(*core.KubeAPIServerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerConfig)(nil), (*KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(a.(*core.KubeAPIServerConfig), b.(*KubeAPIServerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeAPIServerRequests)(nil), (*core.KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(a.(*KubeAPIServerRequests), b.(*core.KubeAPIServerRequests), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerRequests)(nil), (*KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(a.(*core.KubeAPIServerRequests), b.(*KubeAPIServerRequests), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeControllerManagerConfig)(nil), (*core.KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(a.(*KubeControllerManagerConfig), b.(*core.KubeControllerManagerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeControllerManagerConfig)(nil), (*KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(a.(*core.KubeControllerManagerConfig), b.(*KubeControllerManagerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeProxyConfig)(nil), (*core.KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(a.(*KubeProxyConfig), b.(*core.KubeProxyConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeProxyConfig)(nil), (*KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(a.(*core.KubeProxyConfig), b.(*KubeProxyConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeSchedulerConfig)(nil), (*core.KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(a.(*KubeSchedulerConfig), b.(*core.KubeSchedulerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(a.(*core.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfig)(nil), (*core.KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeletConfig_To_core_KubeletConfig(a.(*KubeletConfig), b.(*core.KubeletConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfig)(nil), (*KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfig_To_v1alpha1_KubeletConfig(a.(*core.KubeletConfig), b.(*KubeletConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigEviction)(nil), (*core.KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(a.(*KubeletConfigEviction), b.(*core.KubeletConfigEviction), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEviction)(nil), (*KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(a.(*core.KubeletConfigEviction), b.(*KubeletConfigEviction), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionMinimumReclaim)(nil), (*core.KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(a.(*KubeletConfigEvictionMinimumReclaim), b.(*core.KubeletConfigEvictionMinimumReclaim), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionMinimumReclaim)(nil), (*KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(a.(*core.KubeletConfigEvictionMinimumReclaim), b.(*KubeletConfigEvictionMinimumReclaim), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionSoftGracePeriod)(nil), (*core.KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(a.(*KubeletConfigEvictionSoftGracePeriod), b.(*core.KubeletConfigEvictionSoftGracePeriod), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionSoftGracePeriod)(nil), (*KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(a.(*core.KubeletConfigEvictionSoftGracePeriod), b.(*KubeletConfigEvictionSoftGracePeriod), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigReserved)(nil), (*core.KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(a.(*KubeletConfigReserved), b.(*core.KubeletConfigReserved), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigReserved)(nil), (*KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(a.(*core.KubeletConfigReserved), b.(*KubeletConfigReserved), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Kubernetes)(nil), (*core.Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Kubernetes_To_core_Kubernetes(a.(*Kubernetes), b.(*core.Kubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Kubernetes)(nil), (*Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Kubernetes_To_v1alpha1_Kubernetes(a.(*core.Kubernetes), b.(*Kubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesConfig)(nil), (*core.KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(a.(*KubernetesConfig), b.(*core.KubernetesConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesConfig)(nil), (*KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(a.(*core.KubernetesConfig), b.(*KubernetesConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesDashboard)(nil), (*core.KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(a.(*KubernetesDashboard), b.(*core.KubernetesDashboard), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesDashboard)(nil), (*KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(a.(*core.KubernetesDashboard), b.(*KubernetesDashboard), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesInfo)(nil), (*core.KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(a.(*KubernetesInfo), b.(*core.KubernetesInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesInfo)(nil), (*KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(a.(*core.KubernetesInfo), b.(*KubernetesInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesSettings)(nil), (*core.KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(a.(*KubernetesSettings), b.(*core.KubernetesSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesSettings)(nil), (*KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(a.(*core.KubernetesSettings), b.(*KubernetesSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*LastError)(nil), (*core.LastError)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_LastError_To_core_LastError(a.(*LastError), b.(*core.LastError), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.LastError)(nil), (*LastError)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_LastError_To_v1alpha1_LastError(a.(*core.LastError), b.(*LastError), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*LastOperation)(nil), (*core.LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_LastOperation_To_core_LastOperation(a.(*LastOperation), b.(*core.LastOperation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.LastOperation)(nil), (*LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_LastOperation_To_v1alpha1_LastOperation(a.(*core.LastOperation), b.(*LastOperation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Machine)(nil), (*core.Machine)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Machine_To_core_Machine(a.(*Machine), b.(*core.Machine), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Machine)(nil), (*Machine)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Machine_To_v1alpha1_Machine(a.(*core.Machine), b.(*Machine), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineControllerManagerSettings)(nil), (*core.MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(a.(*MachineControllerManagerSettings), b.(*core.MachineControllerManagerSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineControllerManagerSettings)(nil), (*MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(a.(*core.MachineControllerManagerSettings), b.(*MachineControllerManagerSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineImage)(nil), (*core.MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MachineImage_To_core_MachineImage(a.(*MachineImage), b.(*core.MachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineImage)(nil), (*MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineImage_To_v1alpha1_MachineImage(a.(*core.MachineImage), b.(*MachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineImageVersion)(nil), (*core.MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(a.(*MachineImageVersion), b.(*core.MachineImageVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineImageVersion)(nil), (*MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(a.(*core.MachineImageVersion), b.(*MachineImageVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineType)(nil), (*core.MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MachineType_To_core_MachineType(a.(*MachineType), b.(*core.MachineType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineType)(nil), (*MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineType_To_v1alpha1_MachineType(a.(*core.MachineType), b.(*MachineType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineTypeStorage)(nil), (*core.MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(a.(*MachineTypeStorage), b.(*core.MachineTypeStorage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineTypeStorage)(nil), (*MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(a.(*core.MachineTypeStorage), b.(*MachineTypeStorage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Maintenance)(nil), (*core.Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Maintenance_To_core_Maintenance(a.(*Maintenance), b.(*core.Maintenance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Maintenance)(nil), (*Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Maintenance_To_v1alpha1_Maintenance(a.(*core.Maintenance), b.(*Maintenance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MaintenanceAutoUpdate)(nil), (*core.MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(a.(*MaintenanceAutoUpdate), b.(*core.MaintenanceAutoUpdate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MaintenanceAutoUpdate)(nil), (*MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(a.(*core.MaintenanceAutoUpdate), b.(*MaintenanceAutoUpdate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MaintenanceTimeWindow)(nil), (*core.MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(a.(*MaintenanceTimeWindow), b.(*core.MaintenanceTimeWindow), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MaintenanceTimeWindow)(nil), (*MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(a.(*core.MaintenanceTimeWindow), b.(*MaintenanceTimeWindow), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Monitoring)(nil), (*core.Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Monitoring_To_core_Monitoring(a.(*Monitoring), b.(*core.Monitoring), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Monitoring)(nil), (*Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Monitoring_To_v1alpha1_Monitoring(a.(*core.Monitoring), b.(*Monitoring), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*NamedResourceReference)(nil), (*core.NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(a.(*NamedResourceReference), b.(*core.NamedResourceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.NamedResourceReference)(nil), (*NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(a.(*core.NamedResourceReference), b.(*NamedResourceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*core.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Networking_To_core_Networking(a.(*Networking), b.(*core.Networking), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Networking_To_v1alpha1_Networking(a.(*core.Networking), b.(*Networking), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*NginxIngress)(nil), (*core.NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_NginxIngress_To_core_NginxIngress(a.(*NginxIngress), b.(*core.NginxIngress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.NginxIngress)(nil), (*NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_NginxIngress_To_v1alpha1_NginxIngress(a.(*core.NginxIngress), b.(*NginxIngress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*OIDCConfig)(nil), (*core.OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_OIDCConfig_To_core_OIDCConfig(a.(*OIDCConfig), b.(*core.OIDCConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.OIDCConfig)(nil), (*OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_OIDCConfig_To_v1alpha1_OIDCConfig(a.(*core.OIDCConfig), b.(*OIDCConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*OpenIDConnectClientAuthentication)(nil), (*core.OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(a.(*OpenIDConnectClientAuthentication), b.(*core.OpenIDConnectClientAuthentication), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.OpenIDConnectClientAuthentication)(nil), (*OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(a.(*core.OpenIDConnectClientAuthentication), b.(*OpenIDConnectClientAuthentication), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Plant)(nil), (*core.Plant)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Plant_To_core_Plant(a.(*Plant), b.(*core.Plant), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Plant)(nil), (*Plant)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Plant_To_v1alpha1_Plant(a.(*core.Plant), b.(*Plant), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PlantList)(nil), (*core.PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_PlantList_To_core_PlantList(a.(*PlantList), b.(*core.PlantList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.PlantList)(nil), (*PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_PlantList_To_v1alpha1_PlantList(a.(*core.PlantList), b.(*PlantList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PlantSpec)(nil), (*core.PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_PlantSpec_To_core_PlantSpec(a.(*PlantSpec), b.(*core.PlantSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.PlantSpec)(nil), (*PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_PlantSpec_To_v1alpha1_PlantSpec(a.(*core.PlantSpec), b.(*PlantSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PlantStatus)(nil), (*core.PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_PlantStatus_To_core_PlantStatus(a.(*PlantStatus), b.(*core.PlantStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.PlantStatus)(nil), (*PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_PlantStatus_To_v1alpha1_PlantStatus(a.(*core.PlantStatus), b.(*PlantStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Project)(nil), (*core.Project)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Project_To_core_Project(a.(*Project), b.(*core.Project), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Project)(nil), (*Project)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Project_To_v1alpha1_Project(a.(*core.Project), b.(*Project), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ProjectList)(nil), (*core.ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ProjectList_To_core_ProjectList(a.(*ProjectList), b.(*core.ProjectList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ProjectList)(nil), (*ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectList_To_v1alpha1_ProjectList(a.(*core.ProjectList), b.(*ProjectList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ProjectStatus)(nil), (*core.ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus(a.(*ProjectStatus), b.(*core.ProjectStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ProjectStatus)(nil), (*ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus(a.(*core.ProjectStatus), b.(*ProjectStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ProjectTolerations)(nil), (*core.ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(a.(*ProjectTolerations), b.(*core.ProjectTolerations), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ProjectTolerations)(nil), (*ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(a.(*core.ProjectTolerations), b.(*ProjectTolerations), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Provider)(nil), (*core.Provider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Provider_To_core_Provider(a.(*Provider), b.(*core.Provider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Provider)(nil), (*Provider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Provider_To_v1alpha1_Provider(a.(*core.Provider), b.(*Provider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Quota)(nil), (*core.Quota)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Quota_To_core_Quota(a.(*Quota), b.(*core.Quota), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Quota)(nil), (*Quota)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Quota_To_v1alpha1_Quota(a.(*core.Quota), b.(*Quota), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*QuotaList)(nil), (*core.QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_QuotaList_To_core_QuotaList(a.(*QuotaList), b.(*core.QuotaList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.QuotaList)(nil), (*QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_QuotaList_To_v1alpha1_QuotaList(a.(*core.QuotaList), b.(*QuotaList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*QuotaSpec)(nil), (*core.QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec(a.(*QuotaSpec), b.(*core.QuotaSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.QuotaSpec)(nil), (*QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec(a.(*core.QuotaSpec), b.(*QuotaSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Region)(nil), (*core.Region)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Region_To_core_Region(a.(*Region), b.(*core.Region), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Region)(nil), (*Region)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Region_To_v1alpha1_Region(a.(*core.Region), b.(*Region), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ResourceData)(nil), (*core.ResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ResourceData_To_core_ResourceData(a.(*ResourceData), b.(*core.ResourceData), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ResourceData)(nil), (*ResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ResourceData_To_v1alpha1_ResourceData(a.(*core.ResourceData), b.(*ResourceData), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ResourceWatchCacheSize)(nil), (*core.ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(a.(*ResourceWatchCacheSize), b.(*core.ResourceWatchCacheSize), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ResourceWatchCacheSize)(nil), (*ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(a.(*core.ResourceWatchCacheSize), b.(*ResourceWatchCacheSize), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SecretBinding)(nil), (*core.SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SecretBinding_To_core_SecretBinding(a.(*SecretBinding), b.(*core.SecretBinding), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SecretBinding)(nil), (*SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SecretBinding_To_v1alpha1_SecretBinding(a.(*core.SecretBinding), b.(*SecretBinding), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SecretBindingList)(nil), (*core.SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SecretBindingList_To_core_SecretBindingList(a.(*SecretBindingList), b.(*core.SecretBindingList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SecretBindingList)(nil), (*SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SecretBindingList_To_v1alpha1_SecretBindingList(a.(*core.SecretBindingList), b.(*SecretBindingList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedBackup)(nil), (*core.SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedBackup_To_core_SeedBackup(a.(*SeedBackup), b.(*core.SeedBackup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedBackup)(nil), (*SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedBackup_To_v1alpha1_SeedBackup(a.(*core.SeedBackup), b.(*SeedBackup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedDNS)(nil), (*core.SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedDNS_To_core_SeedDNS(a.(*SeedDNS), b.(*core.SeedDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedDNS)(nil), (*SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedDNS_To_v1alpha1_SeedDNS(a.(*core.SeedDNS), b.(*SeedDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedDNSProvider)(nil), (*core.SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(a.(*SeedDNSProvider), b.(*core.SeedDNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedDNSProvider)(nil), (*SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(a.(*core.SeedDNSProvider), b.(*SeedDNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedList)(nil), (*core.SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedList_To_core_SeedList(a.(*SeedList), b.(*core.SeedList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedList)(nil), (*SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedList_To_v1alpha1_SeedList(a.(*core.SeedList), b.(*SeedList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedProvider)(nil), (*core.SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedProvider_To_core_SeedProvider(a.(*SeedProvider), b.(*core.SeedProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedProvider)(nil), (*SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedProvider_To_v1alpha1_SeedProvider(a.(*core.SeedProvider), b.(*SeedProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSelector)(nil), (*core.SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSelector_To_core_SeedSelector(a.(*SeedSelector), b.(*core.SeedSelector), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSelector)(nil), (*SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSelector_To_v1alpha1_SeedSelector(a.(*core.SeedSelector), b.(*SeedSelector), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingExcessCapacityReservation)(nil), (*core.SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(a.(*SeedSettingExcessCapacityReservation), b.(*core.SeedSettingExcessCapacityReservation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingExcessCapacityReservation)(nil), (*SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(a.(*core.SeedSettingExcessCapacityReservation), b.(*SeedSettingExcessCapacityReservation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingLoadBalancerServices)(nil), (*core.SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(a.(*SeedSettingLoadBalancerServices), b.(*core.SeedSettingLoadBalancerServices), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingLoadBalancerServices)(nil), (*SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(a.(*core.SeedSettingLoadBalancerServices), b.(*SeedSettingLoadBalancerServices), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingScheduling)(nil), (*core.SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(a.(*SeedSettingScheduling), b.(*core.SeedSettingScheduling), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingScheduling)(nil), (*SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(a.(*core.SeedSettingScheduling), b.(*SeedSettingScheduling), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingShootDNS)(nil), (*core.SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(a.(*SeedSettingShootDNS), b.(*core.SeedSettingShootDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingShootDNS)(nil), (*SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(a.(*core.SeedSettingShootDNS), b.(*SeedSettingShootDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingVerticalPodAutoscaler)(nil), (*core.SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(a.(*SeedSettingVerticalPodAutoscaler), b.(*core.SeedSettingVerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingVerticalPodAutoscaler)(nil), (*SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(a.(*core.SeedSettingVerticalPodAutoscaler), b.(*SeedSettingVerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettings)(nil), (*core.SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSettings_To_core_SeedSettings(a.(*SeedSettings), b.(*core.SeedSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettings)(nil), (*SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettings_To_v1alpha1_SeedSettings(a.(*core.SeedSettings), b.(*SeedSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedStatus)(nil), (*core.SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedStatus_To_core_SeedStatus(a.(*SeedStatus), b.(*core.SeedStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedTaint)(nil), (*core.SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedTaint_To_core_SeedTaint(a.(*SeedTaint), b.(*core.SeedTaint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedTaint)(nil), (*SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedTaint_To_v1alpha1_SeedTaint(a.(*core.SeedTaint), b.(*SeedTaint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedVolume)(nil), (*core.SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedVolume_To_core_SeedVolume(a.(*SeedVolume), b.(*core.SeedVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedVolume)(nil), (*SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedVolume_To_v1alpha1_SeedVolume(a.(*core.SeedVolume), b.(*SeedVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedVolumeProvider)(nil), (*core.SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(a.(*SeedVolumeProvider), b.(*core.SeedVolumeProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedVolumeProvider)(nil), (*SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(a.(*core.SeedVolumeProvider), b.(*SeedVolumeProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ServiceAccountConfig)(nil), (*core.ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(a.(*ServiceAccountConfig), b.(*core.ServiceAccountConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ServiceAccountConfig)(nil), (*ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(a.(*core.ServiceAccountConfig), b.(*ServiceAccountConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Shoot)(nil), (*core.Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Shoot_To_core_Shoot(a.(*Shoot), b.(*core.Shoot), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Shoot)(nil), (*Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Shoot_To_v1alpha1_Shoot(a.(*core.Shoot), b.(*Shoot), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootList)(nil), (*core.ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootList_To_core_ShootList(a.(*ShootList), b.(*core.ShootList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootList)(nil), (*ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootList_To_v1alpha1_ShootList(a.(*core.ShootList), b.(*ShootList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootMachineImage)(nil), (*core.ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(a.(*ShootMachineImage), b.(*core.ShootMachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootMachineImage)(nil), (*ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(a.(*core.ShootMachineImage), b.(*ShootMachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootNetworks)(nil), (*core.ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootNetworks_To_core_ShootNetworks(a.(*ShootNetworks), b.(*core.ShootNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootNetworks)(nil), (*ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootNetworks_To_v1alpha1_ShootNetworks(a.(*core.ShootNetworks), b.(*ShootNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootSpec)(nil), (*core.ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootSpec_To_core_ShootSpec(a.(*ShootSpec), b.(*core.ShootSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootSpec)(nil), (*ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootSpec_To_v1alpha1_ShootSpec(a.(*core.ShootSpec), b.(*ShootSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootState)(nil), (*core.ShootState)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootState_To_core_ShootState(a.(*ShootState), b.(*core.ShootState), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootState)(nil), (*ShootState)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootState_To_v1alpha1_ShootState(a.(*core.ShootState), b.(*ShootState), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootStateList)(nil), (*core.ShootStateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootStateList_To_core_ShootStateList(a.(*ShootStateList), b.(*core.ShootStateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootStateList)(nil), (*ShootStateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootStateList_To_v1alpha1_ShootStateList(a.(*core.ShootStateList), b.(*ShootStateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootStateSpec)(nil), (*core.ShootStateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(a.(*ShootStateSpec), b.(*core.ShootStateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootStateSpec)(nil), (*ShootStateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(a.(*core.ShootStateSpec), b.(*ShootStateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Toleration_To_core_Toleration(a.(*Toleration), b.(*core.Toleration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Toleration_To_v1alpha1_Toleration(a.(*core.Toleration), b.(*Toleration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*VerticalPodAutoscaler)(nil), (*core.VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(a.(*VerticalPodAutoscaler), b.(*core.VerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.VerticalPodAutoscaler)(nil), (*VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(a.(*core.VerticalPodAutoscaler), b.(*VerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Volume_To_core_Volume(a.(*Volume), b.(*core.Volume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Volume_To_v1alpha1_Volume(a.(*core.Volume), b.(*Volume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*VolumeType)(nil), (*core.VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_VolumeType_To_core_VolumeType(a.(*VolumeType), b.(*core.VolumeType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.VolumeType)(nil), (*VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_VolumeType_To_v1alpha1_VolumeType(a.(*core.VolumeType), b.(*VolumeType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*WatchCacheSizes)(nil), (*core.WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(a.(*WatchCacheSizes), b.(*core.WatchCacheSizes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.WatchCacheSizes)(nil), (*WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(a.(*core.WatchCacheSizes), b.(*WatchCacheSizes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Worker)(nil), (*core.Worker)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Worker_To_core_Worker(a.(*Worker), b.(*core.Worker), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Worker)(nil), (*Worker)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Worker_To_v1alpha1_Worker(a.(*core.Worker), b.(*Worker), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*WorkerKubernetes)(nil), (*core.WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(a.(*WorkerKubernetes), b.(*core.WorkerKubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.WorkerKubernetes)(nil), (*WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(a.(*core.WorkerKubernetes), b.(*WorkerKubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*WorkerSystemComponents)(nil), (*core.WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(a.(*WorkerSystemComponents), b.(*core.WorkerSystemComponents), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.WorkerSystemComponents)(nil), (*WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(a.(*core.WorkerSystemComponents), b.(*WorkerSystemComponents), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.BackupBucketSpec)(nil), (*BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(a.(*core.BackupBucketSpec), b.(*BackupBucketSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.BackupBucket)(nil), (*BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucket_To_v1alpha1_BackupBucket(a.(*core.BackupBucket), b.(*BackupBucket), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.BackupEntrySpec)(nil), (*BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(a.(*core.BackupEntrySpec), b.(*BackupEntrySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.BackupEntry)(nil), (*BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntry_To_v1alpha1_BackupEntry(a.(*core.BackupEntry), b.(*BackupEntry), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectMember_To_v1alpha1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.ProjectSpec)(nil), (*ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectSpec_To_v1alpha1_ProjectSpec(a.(*core.ProjectSpec), b.(*ProjectSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.SeedNetworks)(nil), (*SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(a.(*core.SeedNetworks), b.(*SeedNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.SeedSpec)(nil), (*SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSpec_To_v1alpha1_SeedSpec(a.(*core.SeedSpec), b.(*SeedSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.SeedStatus)(nil), (*SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedStatus_To_v1alpha1_SeedStatus(a.(*core.SeedStatus), b.(*SeedStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.Seed)(nil), (*Seed)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Seed_To_v1alpha1_Seed(a.(*core.Seed), b.(*Seed), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.ShootStatus)(nil), (*ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootStatus_To_v1alpha1_ShootStatus(a.(*core.ShootStatus), b.(*ShootStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*BackupBucketSpec)(nil), (*core.BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(a.(*BackupBucketSpec), b.(*core.BackupBucketSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*BackupBucket)(nil), (*core.BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupBucket_To_core_BackupBucket(a.(*BackupBucket), b.(*core.BackupBucket), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*BackupEntrySpec)(nil), (*core.BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(a.(*BackupEntrySpec), b.(*core.BackupEntrySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*BackupEntry)(nil), (*core.BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_BackupEntry_To_core_BackupEntry(a.(*BackupEntry), b.(*core.BackupEntry), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*ProjectMember)(nil), (*core.ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ProjectMember_To_core_ProjectMember(a.(*ProjectMember), b.(*core.ProjectMember), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*ProjectSpec)(nil), (*core.ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ProjectSpec_To_core_ProjectSpec(a.(*ProjectSpec), b.(*core.ProjectSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*SeedNetworks)(nil), (*core.SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(a.(*SeedNetworks), b.(*core.SeedNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*Seed)(nil), (*core.Seed)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Seed_To_core_Seed(a.(*Seed), b.(*core.Seed), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*ShootStatus)(nil), (*core.ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ShootStatus_To_core_ShootStatus(a.(*ShootStatus), b.(*core.ShootStatus), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1alpha1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1alpha1_Addon_To_core_Addon is an autogenerated conversion function.
+func Convert_v1alpha1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Addon_To_core_Addon(in, out, s)
+}
+
+func autoConvert_core_Addon_To_v1alpha1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_Addon_To_v1alpha1_Addon is an autogenerated conversion function.
+func Convert_core_Addon_To_v1alpha1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error {
+ return autoConvert_core_Addon_To_v1alpha1_Addon(in, out, s)
+}
+
+func autoConvert_v1alpha1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error {
+ out.KubernetesDashboard = (*core.KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard))
+ out.NginxIngress = (*core.NginxIngress)(unsafe.Pointer(in.NginxIngress))
+ return nil
+}
+
+// Convert_v1alpha1_Addons_To_core_Addons is an autogenerated conversion function.
+func Convert_v1alpha1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Addons_To_core_Addons(in, out, s)
+}
+
+func autoConvert_core_Addons_To_v1alpha1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error {
+ out.KubernetesDashboard = (*KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard))
+ out.NginxIngress = (*NginxIngress)(unsafe.Pointer(in.NginxIngress))
+ return nil
+}
+
+// Convert_core_Addons_To_v1alpha1_Addons is an autogenerated conversion function.
+func Convert_core_Addons_To_v1alpha1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error {
+ return autoConvert_core_Addons_To_v1alpha1_Addons(in, out, s)
+}
+
+func autoConvert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config))
+ return nil
+}
+
+// Convert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin is an autogenerated conversion function.
+func Convert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error {
+ return autoConvert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(in, out, s)
+}
+
+func autoConvert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config))
+ return nil
+}
+
+// Convert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin is an autogenerated conversion function.
+func Convert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error {
+ return autoConvert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(in, out, s)
+}
+
+func autoConvert_v1alpha1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error {
+ out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers))
+ return nil
+}
+
+// Convert_v1alpha1_Alerting_To_core_Alerting is an autogenerated conversion function.
+func Convert_v1alpha1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Alerting_To_core_Alerting(in, out, s)
+}
+
+func autoConvert_core_Alerting_To_v1alpha1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error {
+ out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers))
+ return nil
+}
+
+// Convert_core_Alerting_To_v1alpha1_Alerting is an autogenerated conversion function.
+func Convert_core_Alerting_To_v1alpha1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error {
+ return autoConvert_core_Alerting_To_v1alpha1_Alerting(in, out, s)
+}
+
+func autoConvert_v1alpha1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error {
+ out.AuditPolicy = (*core.AuditPolicy)(unsafe.Pointer(in.AuditPolicy))
+ return nil
+}
+
+// Convert_v1alpha1_AuditConfig_To_core_AuditConfig is an autogenerated conversion function.
+func Convert_v1alpha1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_AuditConfig_To_core_AuditConfig(in, out, s)
+}
+
+func autoConvert_core_AuditConfig_To_v1alpha1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error {
+ out.AuditPolicy = (*AuditPolicy)(unsafe.Pointer(in.AuditPolicy))
+ return nil
+}
+
+// Convert_core_AuditConfig_To_v1alpha1_AuditConfig is an autogenerated conversion function.
+func Convert_core_AuditConfig_To_v1alpha1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error {
+ return autoConvert_core_AuditConfig_To_v1alpha1_AuditConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error {
+ out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef))
+ return nil
+}
+
+// Convert_v1alpha1_AuditPolicy_To_core_AuditPolicy is an autogenerated conversion function.
+func Convert_v1alpha1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error {
+ return autoConvert_v1alpha1_AuditPolicy_To_core_AuditPolicy(in, out, s)
+}
+
+func autoConvert_core_AuditPolicy_To_v1alpha1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error {
+ out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef))
+ return nil
+}
+
+// Convert_core_AuditPolicy_To_v1alpha1_AuditPolicy is an autogenerated conversion function.
+func Convert_core_AuditPolicy_To_v1alpha1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error {
+ return autoConvert_core_AuditPolicy_To_v1alpha1_AuditPolicy(in, out, s)
+}
+
+func autoConvert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error {
+ out.Name = in.Name
+ out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes))
+ out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes))
+ return nil
+}
+
+// Convert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone is an autogenerated conversion function.
+func Convert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error {
+ return autoConvert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(in, out, s)
+}
+
+func autoConvert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error {
+ out.Name = in.Name
+ out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes))
+ out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes))
+ return nil
+}
+
+// Convert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone is an autogenerated conversion function.
+func Convert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error {
+ return autoConvert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(in, out, s)
+}
+
+func autoConvert_v1alpha1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_core_BackupBucket_To_v1alpha1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1alpha1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.BackupBucket, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_BackupBucket_To_core_BackupBucket(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_BackupBucketList_To_core_BackupBucketList is an autogenerated conversion function.
+func Convert_v1alpha1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupBucketList_To_core_BackupBucketList(in, out, s)
+}
+
+func autoConvert_core_BackupBucketList_To_v1alpha1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupBucket, len(*in))
+ for i := range *in {
+ if err := Convert_core_BackupBucket_To_v1alpha1_BackupBucket(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_BackupBucketList_To_v1alpha1_BackupBucketList is an autogenerated conversion function.
+func Convert_core_BackupBucketList_To_v1alpha1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketList_To_v1alpha1_BackupBucketList(in, out, s)
+}
+
+func autoConvert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider is an autogenerated conversion function.
+func Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(in, out, s)
+}
+
+func autoConvert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider is an autogenerated conversion function.
+func Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(in, out, s)
+}
+
+func autoConvert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error {
+ if err := Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.SecretRef = in.SecretRef
+ // WARNING: in.Seed requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error {
+ if err := Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.SecretRef = in.SecretRef
+ // WARNING: in.SeedName requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error {
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef))
+ return nil
+}
+
+// Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus is an autogenerated conversion function.
+func Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(in, out, s)
+}
+
+func autoConvert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error {
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef))
+ return nil
+}
+
+// Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus is an autogenerated conversion function.
+func Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(in, out, s)
+}
+
+func autoConvert_v1alpha1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_core_BackupEntry_To_v1alpha1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1alpha1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.BackupEntry, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_BackupEntry_To_core_BackupEntry(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_BackupEntryList_To_core_BackupEntryList is an autogenerated conversion function.
+func Convert_v1alpha1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupEntryList_To_core_BackupEntryList(in, out, s)
+}
+
+func autoConvert_core_BackupEntryList_To_v1alpha1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupEntry, len(*in))
+ for i := range *in {
+ if err := Convert_core_BackupEntry_To_v1alpha1_BackupEntry(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_BackupEntryList_To_v1alpha1_BackupEntryList is an autogenerated conversion function.
+func Convert_core_BackupEntryList_To_v1alpha1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error {
+ return autoConvert_core_BackupEntryList_To_v1alpha1_BackupEntryList(in, out, s)
+}
+
+func autoConvert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error {
+ out.BucketName = in.BucketName
+ // WARNING: in.Seed requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error {
+ out.BucketName = in.BucketName
+ // WARNING: in.SeedName requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error {
+ out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+// Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus is an autogenerated conversion function.
+func Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(in, out, s)
+}
+
+func autoConvert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error {
+ out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+// Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus is an autogenerated conversion function.
+func Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error {
+ return autoConvert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(in, out, s)
+}
+
+func autoConvert_v1alpha1_CRI_To_core_CRI(in *CRI, out *core.CRI, s conversion.Scope) error {
+ out.Name = core.CRIName(in.Name)
+ out.ContainerRuntimes = *(*[]core.ContainerRuntime)(unsafe.Pointer(&in.ContainerRuntimes))
+ return nil
+}
+
+// Convert_v1alpha1_CRI_To_core_CRI is an autogenerated conversion function.
+func Convert_v1alpha1_CRI_To_core_CRI(in *CRI, out *core.CRI, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CRI_To_core_CRI(in, out, s)
+}
+
+func autoConvert_core_CRI_To_v1alpha1_CRI(in *core.CRI, out *CRI, s conversion.Scope) error {
+ out.Name = CRIName(in.Name)
+ out.ContainerRuntimes = *(*[]ContainerRuntime)(unsafe.Pointer(&in.ContainerRuntimes))
+ return nil
+}
+
+// Convert_core_CRI_To_v1alpha1_CRI is an autogenerated conversion function.
+func Convert_core_CRI_To_v1alpha1_CRI(in *core.CRI, out *CRI, s conversion.Scope) error {
+ return autoConvert_core_CRI_To_v1alpha1_CRI(in, out, s)
+}
+
+func autoConvert_v1alpha1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_v1alpha1_CloudInfo_To_core_CloudInfo is an autogenerated conversion function.
+func Convert_v1alpha1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CloudInfo_To_core_CloudInfo(in, out, s)
+}
+
+func autoConvert_core_CloudInfo_To_v1alpha1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_core_CloudInfo_To_v1alpha1_CloudInfo is an autogenerated conversion function.
+func Convert_core_CloudInfo_To_v1alpha1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error {
+ return autoConvert_core_CloudInfo_To_v1alpha1_CloudInfo(in, out, s)
+}
+
+func autoConvert_v1alpha1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_CloudProfile_To_core_CloudProfile is an autogenerated conversion function.
+func Convert_v1alpha1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CloudProfile_To_core_CloudProfile(in, out, s)
+}
+
+func autoConvert_core_CloudProfile_To_v1alpha1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_CloudProfile_To_v1alpha1_CloudProfile is an autogenerated conversion function.
+func Convert_core_CloudProfile_To_v1alpha1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error {
+ return autoConvert_core_CloudProfile_To_v1alpha1_CloudProfile(in, out, s)
+}
+
+func autoConvert_v1alpha1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.CloudProfile)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_CloudProfileList_To_core_CloudProfileList is an autogenerated conversion function.
+func Convert_v1alpha1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CloudProfileList_To_core_CloudProfileList(in, out, s)
+}
+
+func autoConvert_core_CloudProfileList_To_v1alpha1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]CloudProfile)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_CloudProfileList_To_v1alpha1_CloudProfileList is an autogenerated conversion function.
+func Convert_core_CloudProfileList_To_v1alpha1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error {
+ return autoConvert_core_CloudProfileList_To_v1alpha1_CloudProfileList(in, out, s)
+}
+
+func autoConvert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ if err := Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ out.MachineImages = *(*[]core.MachineImage)(unsafe.Pointer(&in.MachineImages))
+ out.MachineTypes = *(*[]core.MachineType)(unsafe.Pointer(&in.MachineTypes))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Regions = *(*[]core.Region)(unsafe.Pointer(&in.Regions))
+ out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Type = in.Type
+ out.VolumeTypes = *(*[]core.VolumeType)(unsafe.Pointer(&in.VolumeTypes))
+ return nil
+}
+
+// Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec is an autogenerated conversion function.
+func Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(in, out, s)
+}
+
+func autoConvert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ if err := Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ out.MachineImages = *(*[]MachineImage)(unsafe.Pointer(&in.MachineImages))
+ out.MachineTypes = *(*[]MachineType)(unsafe.Pointer(&in.MachineTypes))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Regions = *(*[]Region)(unsafe.Pointer(&in.Regions))
+ out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Type = in.Type
+ out.VolumeTypes = *(*[]VolumeType)(unsafe.Pointer(&in.VolumeTypes))
+ return nil
+}
+
+// Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec is an autogenerated conversion function.
+func Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error {
+ return autoConvert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error {
+ out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd))
+ out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete))
+ out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure))
+ out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime))
+ out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold))
+ out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval))
+ return nil
+}
+
+// Convert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler is an autogenerated conversion function.
+func Convert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(in, out, s)
+}
+
+func autoConvert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error {
+ out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd))
+ out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete))
+ out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure))
+ out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime))
+ out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold))
+ out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval))
+ return nil
+}
+
+// Convert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler is an autogenerated conversion function.
+func Convert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error {
+ return autoConvert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error {
+ if err := Convert_v1alpha1_CloudInfo_To_core_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ClusterInfo_To_core_ClusterInfo is an autogenerated conversion function.
+func Convert_v1alpha1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ClusterInfo_To_core_ClusterInfo(in, out, s)
+}
+
+func autoConvert_core_ClusterInfo_To_v1alpha1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error {
+ if err := Convert_core_CloudInfo_To_v1alpha1_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil {
+ return err
+ }
+ if err := Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ClusterInfo_To_v1alpha1_ClusterInfo is an autogenerated conversion function.
+func Convert_core_ClusterInfo_To_v1alpha1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error {
+ return autoConvert_core_ClusterInfo_To_v1alpha1_ClusterInfo(in, out, s)
+}
+
+func autoConvert_v1alpha1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error {
+ out.Type = core.ConditionType(in.Type)
+ out.Status = core.ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes))
+ return nil
+}
+
+// Convert_v1alpha1_Condition_To_core_Condition is an autogenerated conversion function.
+func Convert_v1alpha1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Condition_To_core_Condition(in, out, s)
+}
+
+func autoConvert_core_Condition_To_v1alpha1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error {
+ out.Type = ConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes))
+ return nil
+}
+
+// Convert_core_Condition_To_v1alpha1_Condition is an autogenerated conversion function.
+func Convert_core_Condition_To_v1alpha1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error {
+ return autoConvert_core_Condition_To_v1alpha1_Condition(in, out, s)
+}
+
+func autoConvert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime is an autogenerated conversion function.
+func Convert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(in, out, s)
+}
+
+func autoConvert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime is an autogenerated conversion function.
+func Convert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error {
+ return autoConvert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Policy = (*core.ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy))
+ out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector))
+ return nil
+}
+
+// Convert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(in, out, s)
+}
+
+func autoConvert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Policy = (*ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy))
+ out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector))
+ return nil
+}
+
+// Convert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment is an autogenerated conversion function.
+func Convert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error {
+ return autoConvert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation is an autogenerated conversion function.
+func Convert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.ControllerInstallation)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]ControllerInstallation)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList is an autogenerated conversion function.
+func Convert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error {
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return nil
+}
+
+// Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error {
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return nil
+}
+
+// Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec is an autogenerated conversion function.
+func Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ return nil
+}
+
+// Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ return nil
+}
+
+// Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus is an autogenerated conversion function.
+func Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(in, out, s)
+}
+
+func autoConvert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration is an autogenerated conversion function.
+func Convert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error {
+ return autoConvert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.ControllerRegistration)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(in, out, s)
+}
+
+func autoConvert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]ControllerRegistration)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList is an autogenerated conversion function.
+func Convert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error {
+ return autoConvert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error {
+ out.Resources = *(*[]core.ControllerResource)(unsafe.Pointer(&in.Resources))
+ out.Deployment = (*core.ControllerDeployment)(unsafe.Pointer(in.Deployment))
+ return nil
+}
+
+// Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in, out, s)
+}
+
+func autoConvert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error {
+ out.Resources = *(*[]ControllerResource)(unsafe.Pointer(&in.Resources))
+ out.Deployment = (*ControllerDeployment)(unsafe.Pointer(in.Deployment))
+ return nil
+}
+
+// Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec is an autogenerated conversion function.
+func Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error {
+ return autoConvert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Type = in.Type
+ out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled))
+ out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ return nil
+}
+
+// Convert_v1alpha1_ControllerResource_To_core_ControllerResource is an autogenerated conversion function.
+func Convert_v1alpha1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ControllerResource_To_core_ControllerResource(in, out, s)
+}
+
+func autoConvert_core_ControllerResource_To_v1alpha1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Type = in.Type
+ out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled))
+ out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ return nil
+}
+
+// Convert_core_ControllerResource_To_v1alpha1_ControllerResource is an autogenerated conversion function.
+func Convert_core_ControllerResource_To_v1alpha1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error {
+ return autoConvert_core_ControllerResource_To_v1alpha1_ControllerResource(in, out, s)
+}
+
+func autoConvert_v1alpha1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error {
+ out.Domain = (*string)(unsafe.Pointer(in.Domain))
+ out.Providers = *(*[]core.DNSProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_v1alpha1_DNS_To_core_DNS is an autogenerated conversion function.
+func Convert_v1alpha1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error {
+ return autoConvert_v1alpha1_DNS_To_core_DNS(in, out, s)
+}
+
+func autoConvert_core_DNS_To_v1alpha1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error {
+ out.Domain = (*string)(unsafe.Pointer(in.Domain))
+ out.Providers = *(*[]DNSProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_core_DNS_To_v1alpha1_DNS is an autogenerated conversion function.
+func Convert_core_DNS_To_v1alpha1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error {
+ return autoConvert_core_DNS_To_v1alpha1_DNS(in, out, s)
+}
+
+func autoConvert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error {
+ out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
+ out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
+ return nil
+}
+
+// Convert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude is an autogenerated conversion function.
+func Convert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error {
+ return autoConvert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(in, out, s)
+}
+
+func autoConvert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error {
+ out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
+ out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
+ return nil
+}
+
+// Convert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude is an autogenerated conversion function.
+func Convert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error {
+ return autoConvert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(in, out, s)
+}
+
+func autoConvert_v1alpha1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error {
+ out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ out.SecretName = (*string)(unsafe.Pointer(in.SecretName))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_v1alpha1_DNSProvider_To_core_DNSProvider is an autogenerated conversion function.
+func Convert_v1alpha1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error {
+ return autoConvert_v1alpha1_DNSProvider_To_core_DNSProvider(in, out, s)
+}
+
+func autoConvert_core_DNSProvider_To_v1alpha1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error {
+ out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ out.SecretName = (*string)(unsafe.Pointer(in.SecretName))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_core_DNSProvider_To_v1alpha1_DNSProvider is an autogenerated conversion function.
+func Convert_core_DNSProvider_To_v1alpha1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error {
+ return autoConvert_core_DNSProvider_To_v1alpha1_DNSProvider(in, out, s)
+}
+
+func autoConvert_v1alpha1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_v1alpha1_DataVolume_To_core_DataVolume is an autogenerated conversion function.
+func Convert_v1alpha1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error {
+ return autoConvert_v1alpha1_DataVolume_To_core_DataVolume(in, out, s)
+}
+
+func autoConvert_core_DataVolume_To_v1alpha1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_core_DataVolume_To_v1alpha1_DataVolume is an autogenerated conversion function.
+func Convert_core_DataVolume_To_v1alpha1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error {
+ return autoConvert_core_DataVolume_To_v1alpha1_DataVolume(in, out, s)
+}
+
+func autoConvert_v1alpha1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error {
+ out.Name = in.Name
+ out.URL = in.URL
+ out.Purpose = in.Purpose
+ return nil
+}
+
+// Convert_v1alpha1_Endpoint_To_core_Endpoint is an autogenerated conversion function.
+func Convert_v1alpha1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Endpoint_To_core_Endpoint(in, out, s)
+}
+
+func autoConvert_core_Endpoint_To_v1alpha1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error {
+ out.Name = in.Name
+ out.URL = in.URL
+ out.Purpose = in.Purpose
+ return nil
+}
+
+// Convert_core_Endpoint_To_v1alpha1_Endpoint is an autogenerated conversion function.
+func Convert_core_Endpoint_To_v1alpha1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error {
+ return autoConvert_core_Endpoint_To_v1alpha1_Endpoint(in, out, s)
+}
+
+func autoConvert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error {
+ out.Version = in.Version
+ out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate))
+ out.Classification = (*core.VersionClassification)(unsafe.Pointer(in.Classification))
+ return nil
+}
+
+// Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion is an autogenerated conversion function.
+func Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(in, out, s)
+}
+
+func autoConvert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error {
+ out.Version = in.Version
+ out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate))
+ out.Classification = (*VersionClassification)(unsafe.Pointer(in.Classification))
+ return nil
+}
+
+// Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion is an autogenerated conversion function.
+func Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error {
+ return autoConvert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(in, out, s)
+}
+
+func autoConvert_v1alpha1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Disabled = (*bool)(unsafe.Pointer(in.Disabled))
+ return nil
+}
+
+// Convert_v1alpha1_Extension_To_core_Extension is an autogenerated conversion function.
+func Convert_v1alpha1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Extension_To_core_Extension(in, out, s)
+}
+
+func autoConvert_core_Extension_To_v1alpha1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Disabled = (*bool)(unsafe.Pointer(in.Disabled))
+ return nil
+}
+
+// Convert_core_Extension_To_v1alpha1_Extension is an autogenerated conversion function.
+func Convert_core_Extension_To_v1alpha1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error {
+ return autoConvert_core_Extension_To_v1alpha1_Extension(in, out, s)
+}
+
+func autoConvert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(in *ExtensionResourceState, out *core.ExtensionResourceState, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Purpose = (*string)(unsafe.Pointer(in.Purpose))
+ out.State = (*runtime.RawExtension)(unsafe.Pointer(in.State))
+ out.Resources = *(*[]core.NamedResourceReference)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState is an autogenerated conversion function.
+func Convert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(in *ExtensionResourceState, out *core.ExtensionResourceState, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(in, out, s)
+}
+
+func autoConvert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(in *core.ExtensionResourceState, out *ExtensionResourceState, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Purpose = (*string)(unsafe.Pointer(in.Purpose))
+ out.State = (*runtime.RawExtension)(unsafe.Pointer(in.State))
+ out.Resources = *(*[]v1beta1.NamedResourceReference)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState is an autogenerated conversion function.
+func Convert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(in *core.ExtensionResourceState, out *ExtensionResourceState, s conversion.Scope) error {
+ return autoConvert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(in, out, s)
+}
+
+func autoConvert_v1alpha1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Name = in.Name
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1alpha1_Gardener_To_core_Gardener is an autogenerated conversion function.
+func Convert_v1alpha1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Gardener_To_core_Gardener(in, out, s)
+}
+
+func autoConvert_core_Gardener_To_v1alpha1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Name = in.Name
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_core_Gardener_To_v1alpha1_Gardener is an autogenerated conversion function.
+func Convert_core_Gardener_To_v1alpha1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error {
+ return autoConvert_core_Gardener_To_v1alpha1_Gardener(in, out, s)
+}
+
+func autoConvert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(in *GardenerResourceData, out *core.GardenerResourceData, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Type = in.Type
+ out.Data = in.Data
+ return nil
+}
+
+// Convert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData is an autogenerated conversion function.
+func Convert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(in *GardenerResourceData, out *core.GardenerResourceData, s conversion.Scope) error {
+ return autoConvert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(in, out, s)
+}
+
+func autoConvert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(in *core.GardenerResourceData, out *GardenerResourceData, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Type = in.Type
+ out.Data = in.Data
+ return nil
+}
+
+// Convert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData is an autogenerated conversion function.
+func Convert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(in *core.GardenerResourceData, out *GardenerResourceData, s conversion.Scope) error {
+ return autoConvert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(in, out, s)
+}
+
+func autoConvert_v1alpha1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error {
+ out.Enabled = (*bool)(unsafe.Pointer(in.Enabled))
+ out.Schedules = *(*[]core.HibernationSchedule)(unsafe.Pointer(&in.Schedules))
+ return nil
+}
+
+// Convert_v1alpha1_Hibernation_To_core_Hibernation is an autogenerated conversion function.
+func Convert_v1alpha1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Hibernation_To_core_Hibernation(in, out, s)
+}
+
+func autoConvert_core_Hibernation_To_v1alpha1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error {
+ out.Enabled = (*bool)(unsafe.Pointer(in.Enabled))
+ out.Schedules = *(*[]HibernationSchedule)(unsafe.Pointer(&in.Schedules))
+ return nil
+}
+
+// Convert_core_Hibernation_To_v1alpha1_Hibernation is an autogenerated conversion function.
+func Convert_core_Hibernation_To_v1alpha1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error {
+ return autoConvert_core_Hibernation_To_v1alpha1_Hibernation(in, out, s)
+}
+
+func autoConvert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error {
+ out.Start = (*string)(unsafe.Pointer(in.Start))
+ out.End = (*string)(unsafe.Pointer(in.End))
+ out.Location = (*string)(unsafe.Pointer(in.Location))
+ return nil
+}
+
+// Convert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule is an autogenerated conversion function.
+func Convert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error {
+ return autoConvert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(in, out, s)
+}
+
+func autoConvert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error {
+ out.Start = (*string)(unsafe.Pointer(in.Start))
+ out.End = (*string)(unsafe.Pointer(in.End))
+ out.Location = (*string)(unsafe.Pointer(in.Location))
+ return nil
+}
+
+// Convert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule is an autogenerated conversion function.
+func Convert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error {
+ return autoConvert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(in, out, s)
+}
+
+func autoConvert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod))
+ out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay))
+ out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization))
+ out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay))
+ out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod))
+ out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance))
+ out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay))
+ return nil
+}
+
+// Convert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig is an autogenerated conversion function.
+func Convert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in, out, s)
+}
+
+func autoConvert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod))
+ out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay))
+ out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization))
+ out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay))
+ out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod))
+ out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance))
+ out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay))
+ return nil
+}
+
+// Convert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig is an autogenerated conversion function.
+func Convert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ return autoConvert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error {
+ out.Domain = in.Domain
+ if err := Convert_v1alpha1_IngressController_To_core_IngressController(&in.Controller, &out.Controller, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Ingress_To_core_Ingress is an autogenerated conversion function.
+func Convert_v1alpha1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Ingress_To_core_Ingress(in, out, s)
+}
+
+func autoConvert_core_Ingress_To_v1alpha1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error {
+ out.Domain = in.Domain
+ if err := Convert_core_IngressController_To_v1alpha1_IngressController(&in.Controller, &out.Controller, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Ingress_To_v1alpha1_Ingress is an autogenerated conversion function.
+func Convert_core_Ingress_To_v1alpha1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error {
+ return autoConvert_core_Ingress_To_v1alpha1_Ingress(in, out, s)
+}
+
+func autoConvert_v1alpha1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_v1alpha1_IngressController_To_core_IngressController is an autogenerated conversion function.
+func Convert_v1alpha1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error {
+ return autoConvert_v1alpha1_IngressController_To_core_IngressController(in, out, s)
+}
+
+func autoConvert_core_IngressController_To_v1alpha1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_core_IngressController_To_v1alpha1_IngressController is an autogenerated conversion function.
+func Convert_core_IngressController_To_v1alpha1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error {
+ return autoConvert_core_IngressController_To_v1alpha1_IngressController(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error {
+ if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.AdmissionPlugins = *(*[]core.AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins))
+ out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences))
+ out.AuditConfig = (*core.AuditConfig)(unsafe.Pointer(in.AuditConfig))
+ out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication))
+ out.OIDCConfig = (*core.OIDCConfig)(unsafe.Pointer(in.OIDCConfig))
+ out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig))
+ out.ServiceAccountConfig = (*core.ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig))
+ out.WatchCacheSizes = (*core.WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes))
+ out.Requests = (*core.KubeAPIServerRequests)(unsafe.Pointer(in.Requests))
+ return nil
+}
+
+// Convert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig is an autogenerated conversion function.
+func Convert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in, out, s)
+}
+
+func autoConvert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.AdmissionPlugins = *(*[]AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins))
+ out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences))
+ out.AuditConfig = (*AuditConfig)(unsafe.Pointer(in.AuditConfig))
+ out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication))
+ out.OIDCConfig = (*OIDCConfig)(unsafe.Pointer(in.OIDCConfig))
+ out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig))
+ out.ServiceAccountConfig = (*ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig))
+ out.WatchCacheSizes = (*WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes))
+ out.Requests = (*KubeAPIServerRequests)(unsafe.Pointer(in.Requests))
+ return nil
+}
+
+// Convert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig is an autogenerated conversion function.
+func Convert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error {
+ out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight))
+ out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight))
+ return nil
+}
+
+// Convert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests is an autogenerated conversion function.
+func Convert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in, out, s)
+}
+
+func autoConvert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error {
+ out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight))
+ out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight))
+ return nil
+}
+
+// Convert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests is an autogenerated conversion function.
+func Convert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error {
+ return autoConvert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error {
+ if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.HorizontalPodAutoscalerConfig = (*core.HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig))
+ out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize))
+ out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout))
+ return nil
+}
+
+// Convert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig is an autogenerated conversion function.
+func Convert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in, out, s)
+}
+
+func autoConvert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.HorizontalPodAutoscalerConfig = (*HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig))
+ out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize))
+ out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout))
+ return nil
+}
+
+// Convert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig is an autogenerated conversion function.
+func Convert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error {
+ if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.Mode = (*core.ProxyMode)(unsafe.Pointer(in.Mode))
+ return nil
+}
+
+// Convert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig is an autogenerated conversion function.
+func Convert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(in, out, s)
+}
+
+func autoConvert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.Mode = (*ProxyMode)(unsafe.Pointer(in.Mode))
+ return nil
+}
+
+// Convert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig is an autogenerated conversion function.
+func Convert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error {
+ if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols))
+ return nil
+}
+
+// Convert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig is an autogenerated conversion function.
+func Convert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in, out, s)
+}
+
+func autoConvert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols))
+ return nil
+}
+
+// Convert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig is an autogenerated conversion function.
+func Convert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error {
+ if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota))
+ out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy))
+ out.EvictionHard = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard))
+ out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod))
+ out.EvictionMinimumReclaim = (*core.KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim))
+ out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod))
+ out.EvictionSoft = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft))
+ out.EvictionSoftGracePeriod = (*core.KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod))
+ out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods))
+ out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit))
+ out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline))
+ out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn))
+ out.KubeReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved))
+ out.SystemReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved))
+ return nil
+}
+
+// Convert_v1alpha1_KubeletConfig_To_core_KubeletConfig is an autogenerated conversion function.
+func Convert_v1alpha1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeletConfig_To_core_KubeletConfig(in, out, s)
+}
+
+func autoConvert_core_KubeletConfig_To_v1alpha1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota))
+ out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy))
+ out.EvictionHard = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard))
+ out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod))
+ out.EvictionMinimumReclaim = (*KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim))
+ out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod))
+ out.EvictionSoft = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft))
+ out.EvictionSoftGracePeriod = (*KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod))
+ out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods))
+ out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit))
+ out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline))
+ out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn))
+ out.KubeReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved))
+ out.SystemReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved))
+ return nil
+}
+
+// Convert_core_KubeletConfig_To_v1alpha1_KubeletConfig is an autogenerated conversion function.
+func Convert_core_KubeletConfig_To_v1alpha1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfig_To_v1alpha1_KubeletConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error {
+ out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction is an autogenerated conversion function.
+func Convert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error {
+ out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction is an autogenerated conversion function.
+func Convert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function.
+func Convert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function.
+func Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function.
+func Convert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function.
+func Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error {
+ out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU))
+ out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory))
+ out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage))
+ out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID))
+ return nil
+}
+
+// Convert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved is an autogenerated conversion function.
+func Convert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error {
+ out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU))
+ out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory))
+ out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage))
+ out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID))
+ return nil
+}
+
+// Convert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved is an autogenerated conversion function.
+func Convert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(in, out, s)
+}
+
+func autoConvert_v1alpha1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error {
+ out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers))
+ out.ClusterAutoscaler = (*core.ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler))
+ out.KubeAPIServer = (*core.KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer))
+ out.KubeControllerManager = (*core.KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager))
+ out.KubeScheduler = (*core.KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler))
+ out.KubeProxy = (*core.KubeProxyConfig)(unsafe.Pointer(in.KubeProxy))
+ out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ out.Version = in.Version
+ out.VerticalPodAutoscaler = (*core.VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_v1alpha1_Kubernetes_To_core_Kubernetes is an autogenerated conversion function.
+func Convert_v1alpha1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Kubernetes_To_core_Kubernetes(in, out, s)
+}
+
+func autoConvert_core_Kubernetes_To_v1alpha1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error {
+ out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers))
+ out.ClusterAutoscaler = (*ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler))
+ out.KubeAPIServer = (*KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer))
+ out.KubeControllerManager = (*KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager))
+ out.KubeScheduler = (*KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler))
+ out.KubeProxy = (*KubeProxyConfig)(unsafe.Pointer(in.KubeProxy))
+ out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ out.Version = in.Version
+ out.VerticalPodAutoscaler = (*VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_core_Kubernetes_To_v1alpha1_Kubernetes is an autogenerated conversion function.
+func Convert_core_Kubernetes_To_v1alpha1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error {
+ return autoConvert_core_Kubernetes_To_v1alpha1_Kubernetes(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error {
+ out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
+ return nil
+}
+
+// Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig is an autogenerated conversion function.
+func Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(in, out, s)
+}
+
+func autoConvert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error {
+ out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
+ return nil
+}
+
+// Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig is an autogenerated conversion function.
+func Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error {
+ return autoConvert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error {
+ if err := Convert_v1alpha1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode))
+ return nil
+}
+
+// Convert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard is an autogenerated conversion function.
+func Convert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(in, out, s)
+}
+
+func autoConvert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error {
+ if err := Convert_core_Addon_To_v1alpha1_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode))
+ return nil
+}
+
+// Convert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard is an autogenerated conversion function.
+func Convert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error {
+ return autoConvert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error {
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo is an autogenerated conversion function.
+func Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(in, out, s)
+}
+
+func autoConvert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error {
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo is an autogenerated conversion function.
+func Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error {
+ return autoConvert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error {
+ out.Versions = *(*[]core.ExpirableVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings is an autogenerated conversion function.
+func Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(in, out, s)
+}
+
+func autoConvert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error {
+ out.Versions = *(*[]ExpirableVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings is an autogenerated conversion function.
+func Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error {
+ return autoConvert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(in, out, s)
+}
+
+func autoConvert_v1alpha1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error {
+ out.Description = in.Description
+ out.TaskID = (*string)(unsafe.Pointer(in.TaskID))
+ out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes))
+ out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime))
+ return nil
+}
+
+// Convert_v1alpha1_LastError_To_core_LastError is an autogenerated conversion function.
+func Convert_v1alpha1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error {
+ return autoConvert_v1alpha1_LastError_To_core_LastError(in, out, s)
+}
+
+func autoConvert_core_LastError_To_v1alpha1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error {
+ out.Description = in.Description
+ out.TaskID = (*string)(unsafe.Pointer(in.TaskID))
+ out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes))
+ out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime))
+ return nil
+}
+
+// Convert_core_LastError_To_v1alpha1_LastError is an autogenerated conversion function.
+func Convert_core_LastError_To_v1alpha1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error {
+ return autoConvert_core_LastError_To_v1alpha1_LastError(in, out, s)
+}
+
+func autoConvert_v1alpha1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error {
+ out.Description = in.Description
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Progress = in.Progress
+ out.State = core.LastOperationState(in.State)
+ out.Type = core.LastOperationType(in.Type)
+ return nil
+}
+
+// Convert_v1alpha1_LastOperation_To_core_LastOperation is an autogenerated conversion function.
+func Convert_v1alpha1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error {
+ return autoConvert_v1alpha1_LastOperation_To_core_LastOperation(in, out, s)
+}
+
+func autoConvert_core_LastOperation_To_v1alpha1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error {
+ out.Description = in.Description
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Progress = in.Progress
+ out.State = LastOperationState(in.State)
+ out.Type = LastOperationType(in.Type)
+ return nil
+}
+
+// Convert_core_LastOperation_To_v1alpha1_LastOperation is an autogenerated conversion function.
+func Convert_core_LastOperation_To_v1alpha1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error {
+ return autoConvert_core_LastOperation_To_v1alpha1_LastOperation(in, out, s)
+}
+
+func autoConvert_v1alpha1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error {
+ out.Type = in.Type
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(core.ShootMachineImage)
+ if err := Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Image = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Machine_To_core_Machine is an autogenerated conversion function.
+func Convert_v1alpha1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Machine_To_core_Machine(in, out, s)
+}
+
+func autoConvert_core_Machine_To_v1alpha1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error {
+ out.Type = in.Type
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ShootMachineImage)
+ if err := Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Image = nil
+ }
+ return nil
+}
+
+// Convert_core_Machine_To_v1alpha1_Machine is an autogenerated conversion function.
+func Convert_core_Machine_To_v1alpha1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error {
+ return autoConvert_core_Machine_To_v1alpha1_Machine(in, out, s)
+}
+
+func autoConvert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error {
+ out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout))
+ out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout))
+ out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout))
+ out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries))
+ out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions))
+ return nil
+}
+
+// Convert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings is an autogenerated conversion function.
+func Convert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in, out, s)
+}
+
+func autoConvert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error {
+ out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout))
+ out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout))
+ out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout))
+ out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries))
+ out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions))
+ return nil
+}
+
+// Convert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings is an autogenerated conversion function.
+func Convert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error {
+ return autoConvert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(in, out, s)
+}
+
+func autoConvert_v1alpha1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Versions = *(*[]core.MachineImageVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_v1alpha1_MachineImage_To_core_MachineImage is an autogenerated conversion function.
+func Convert_v1alpha1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MachineImage_To_core_MachineImage(in, out, s)
+}
+
+func autoConvert_core_MachineImage_To_v1alpha1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Versions = *(*[]MachineImageVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_core_MachineImage_To_v1alpha1_MachineImage is an autogenerated conversion function.
+func Convert_core_MachineImage_To_v1alpha1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error {
+ return autoConvert_core_MachineImage_To_v1alpha1_MachineImage(in, out, s)
+}
+
+func autoConvert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error {
+ if err := Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil {
+ return err
+ }
+ out.CRI = *(*[]core.CRI)(unsafe.Pointer(&in.CRI))
+ return nil
+}
+
+// Convert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion is an autogenerated conversion function.
+func Convert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(in, out, s)
+}
+
+func autoConvert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error {
+ if err := Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil {
+ return err
+ }
+ out.CRI = *(*[]CRI)(unsafe.Pointer(&in.CRI))
+ return nil
+}
+
+// Convert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion is an autogenerated conversion function.
+func Convert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error {
+ return autoConvert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(in, out, s)
+}
+
+func autoConvert_v1alpha1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error {
+ out.CPU = in.CPU
+ out.GPU = in.GPU
+ out.Memory = in.Memory
+ out.Name = in.Name
+ out.Storage = (*core.MachineTypeStorage)(unsafe.Pointer(in.Storage))
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_v1alpha1_MachineType_To_core_MachineType is an autogenerated conversion function.
+func Convert_v1alpha1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MachineType_To_core_MachineType(in, out, s)
+}
+
+func autoConvert_core_MachineType_To_v1alpha1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error {
+ out.CPU = in.CPU
+ out.GPU = in.GPU
+ out.Memory = in.Memory
+ out.Name = in.Name
+ out.Storage = (*MachineTypeStorage)(unsafe.Pointer(in.Storage))
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_core_MachineType_To_v1alpha1_MachineType is an autogenerated conversion function.
+func Convert_core_MachineType_To_v1alpha1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error {
+ return autoConvert_core_MachineType_To_v1alpha1_MachineType(in, out, s)
+}
+
+func autoConvert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error {
+ out.Class = in.Class
+ out.StorageSize = in.StorageSize
+ out.Type = in.Type
+ return nil
+}
+
+// Convert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage is an autogenerated conversion function.
+func Convert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(in, out, s)
+}
+
+func autoConvert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error {
+ out.Class = in.Class
+ out.StorageSize = in.StorageSize
+ out.Type = in.Type
+ return nil
+}
+
+// Convert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage is an autogenerated conversion function.
+func Convert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error {
+ return autoConvert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(in, out, s)
+}
+
+func autoConvert_v1alpha1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error {
+ out.AutoUpdate = (*core.MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate))
+ out.TimeWindow = (*core.MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow))
+ out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout))
+ return nil
+}
+
+// Convert_v1alpha1_Maintenance_To_core_Maintenance is an autogenerated conversion function.
+func Convert_v1alpha1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Maintenance_To_core_Maintenance(in, out, s)
+}
+
+func autoConvert_core_Maintenance_To_v1alpha1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error {
+ out.AutoUpdate = (*MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate))
+ out.TimeWindow = (*MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow))
+ out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout))
+ return nil
+}
+
+// Convert_core_Maintenance_To_v1alpha1_Maintenance is an autogenerated conversion function.
+func Convert_core_Maintenance_To_v1alpha1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error {
+ return autoConvert_core_Maintenance_To_v1alpha1_Maintenance(in, out, s)
+}
+
+func autoConvert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error {
+ out.KubernetesVersion = in.KubernetesVersion
+ out.MachineImageVersion = in.MachineImageVersion
+ return nil
+}
+
+// Convert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate is an autogenerated conversion function.
+func Convert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in, out, s)
+}
+
+func autoConvert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error {
+ out.KubernetesVersion = in.KubernetesVersion
+ out.MachineImageVersion = in.MachineImageVersion
+ return nil
+}
+
+// Convert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate is an autogenerated conversion function.
+func Convert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error {
+ return autoConvert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(in, out, s)
+}
+
+func autoConvert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error {
+ out.Begin = in.Begin
+ out.End = in.End
+ return nil
+}
+
+// Convert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow is an autogenerated conversion function.
+func Convert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error {
+ return autoConvert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in, out, s)
+}
+
+func autoConvert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error {
+ out.Begin = in.Begin
+ out.End = in.End
+ return nil
+}
+
+// Convert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow is an autogenerated conversion function.
+func Convert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error {
+ return autoConvert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(in, out, s)
+}
+
+func autoConvert_v1alpha1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error {
+ out.Alerting = (*core.Alerting)(unsafe.Pointer(in.Alerting))
+ return nil
+}
+
+// Convert_v1alpha1_Monitoring_To_core_Monitoring is an autogenerated conversion function.
+func Convert_v1alpha1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Monitoring_To_core_Monitoring(in, out, s)
+}
+
+func autoConvert_core_Monitoring_To_v1alpha1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error {
+ out.Alerting = (*Alerting)(unsafe.Pointer(in.Alerting))
+ return nil
+}
+
+// Convert_core_Monitoring_To_v1alpha1_Monitoring is an autogenerated conversion function.
+func Convert_core_Monitoring_To_v1alpha1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error {
+ return autoConvert_core_Monitoring_To_v1alpha1_Monitoring(in, out, s)
+}
+
+func autoConvert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ResourceRef = in.ResourceRef
+ return nil
+}
+
+// Convert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference is an autogenerated conversion function.
+func Convert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error {
+ return autoConvert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(in, out, s)
+}
+
+func autoConvert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ResourceRef = in.ResourceRef
+ return nil
+}
+
+// Convert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference is an autogenerated conversion function.
+func Convert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error {
+ return autoConvert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(in, out, s)
+}
+
+func autoConvert_v1alpha1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_v1alpha1_Networking_To_core_Networking is an autogenerated conversion function.
+func Convert_v1alpha1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Networking_To_core_Networking(in, out, s)
+}
+
+func autoConvert_core_Networking_To_v1alpha1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_core_Networking_To_v1alpha1_Networking is an autogenerated conversion function.
+func Convert_core_Networking_To_v1alpha1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error {
+ return autoConvert_core_Networking_To_v1alpha1_Networking(in, out, s)
+}
+
+func autoConvert_v1alpha1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error {
+ if err := Convert_v1alpha1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
+ out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config))
+ out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy))
+ return nil
+}
+
+// Convert_v1alpha1_NginxIngress_To_core_NginxIngress is an autogenerated conversion function.
+func Convert_v1alpha1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error {
+ return autoConvert_v1alpha1_NginxIngress_To_core_NginxIngress(in, out, s)
+}
+
+func autoConvert_core_NginxIngress_To_v1alpha1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error {
+ if err := Convert_core_Addon_To_v1alpha1_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
+ out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config))
+ out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy))
+ return nil
+}
+
+// Convert_core_NginxIngress_To_v1alpha1_NginxIngress is an autogenerated conversion function.
+func Convert_core_NginxIngress_To_v1alpha1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error {
+ return autoConvert_core_NginxIngress_To_v1alpha1_NginxIngress(in, out, s)
+}
+
+func autoConvert_v1alpha1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.ClientAuthentication = (*core.OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication))
+ out.ClientID = (*string)(unsafe.Pointer(in.ClientID))
+ out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
+ out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
+ out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL))
+ out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
+ out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs))
+ out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
+ out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
+ return nil
+}
+
+// Convert_v1alpha1_OIDCConfig_To_core_OIDCConfig is an autogenerated conversion function.
+func Convert_v1alpha1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_OIDCConfig_To_core_OIDCConfig(in, out, s)
+}
+
+func autoConvert_core_OIDCConfig_To_v1alpha1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.ClientAuthentication = (*OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication))
+ out.ClientID = (*string)(unsafe.Pointer(in.ClientID))
+ out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
+ out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
+ out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL))
+ out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
+ out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs))
+ out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
+ out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
+ return nil
+}
+
+// Convert_core_OIDCConfig_To_v1alpha1_OIDCConfig is an autogenerated conversion function.
+func Convert_core_OIDCConfig_To_v1alpha1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error {
+ return autoConvert_core_OIDCConfig_To_v1alpha1_OIDCConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig))
+ out.Secret = (*string)(unsafe.Pointer(in.Secret))
+ return nil
+}
+
+// Convert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication is an autogenerated conversion function.
+func Convert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ return autoConvert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in, out, s)
+}
+
+func autoConvert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig))
+ out.Secret = (*string)(unsafe.Pointer(in.Secret))
+ return nil
+}
+
+// Convert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication is an autogenerated conversion function.
+func Convert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ return autoConvert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in, out, s)
+}
+
+func autoConvert_v1alpha1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_PlantSpec_To_core_PlantSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_PlantStatus_To_core_PlantStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Plant_To_core_Plant is an autogenerated conversion function.
+func Convert_v1alpha1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Plant_To_core_Plant(in, out, s)
+}
+
+func autoConvert_core_Plant_To_v1alpha1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_PlantSpec_To_v1alpha1_PlantSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_PlantStatus_To_v1alpha1_PlantStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Plant_To_v1alpha1_Plant is an autogenerated conversion function.
+func Convert_core_Plant_To_v1alpha1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error {
+ return autoConvert_core_Plant_To_v1alpha1_Plant(in, out, s)
+}
+
+func autoConvert_v1alpha1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.Plant)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_PlantList_To_core_PlantList is an autogenerated conversion function.
+func Convert_v1alpha1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PlantList_To_core_PlantList(in, out, s)
+}
+
+func autoConvert_core_PlantList_To_v1alpha1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]Plant)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_PlantList_To_v1alpha1_PlantList is an autogenerated conversion function.
+func Convert_core_PlantList_To_v1alpha1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error {
+ return autoConvert_core_PlantList_To_v1alpha1_PlantList(in, out, s)
+}
+
+func autoConvert_v1alpha1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error {
+ out.SecretRef = in.SecretRef
+ out.Endpoints = *(*[]core.Endpoint)(unsafe.Pointer(&in.Endpoints))
+ return nil
+}
+
+// Convert_v1alpha1_PlantSpec_To_core_PlantSpec is an autogenerated conversion function.
+func Convert_v1alpha1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PlantSpec_To_core_PlantSpec(in, out, s)
+}
+
+func autoConvert_core_PlantSpec_To_v1alpha1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error {
+ out.SecretRef = in.SecretRef
+ out.Endpoints = *(*[]Endpoint)(unsafe.Pointer(&in.Endpoints))
+ return nil
+}
+
+// Convert_core_PlantSpec_To_v1alpha1_PlantSpec is an autogenerated conversion function.
+func Convert_core_PlantSpec_To_v1alpha1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error {
+ return autoConvert_core_PlantSpec_To_v1alpha1_PlantSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
+ out.ClusterInfo = (*core.ClusterInfo)(unsafe.Pointer(in.ClusterInfo))
+ return nil
+}
+
+// Convert_v1alpha1_PlantStatus_To_core_PlantStatus is an autogenerated conversion function.
+func Convert_v1alpha1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PlantStatus_To_core_PlantStatus(in, out, s)
+}
+
+func autoConvert_core_PlantStatus_To_v1alpha1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
+ out.ClusterInfo = (*ClusterInfo)(unsafe.Pointer(in.ClusterInfo))
+ return nil
+}
+
+// Convert_core_PlantStatus_To_v1alpha1_PlantStatus is an autogenerated conversion function.
+func Convert_core_PlantStatus_To_v1alpha1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error {
+ return autoConvert_core_PlantStatus_To_v1alpha1_PlantStatus(in, out, s)
+}
+
+func autoConvert_v1alpha1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_ProjectSpec_To_core_ProjectSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Project_To_core_Project is an autogenerated conversion function.
+func Convert_v1alpha1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Project_To_core_Project(in, out, s)
+}
+
+func autoConvert_core_Project_To_v1alpha1_Project(in *core.Project, out *Project, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ProjectSpec_To_v1alpha1_ProjectSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Project_To_v1alpha1_Project is an autogenerated conversion function.
+func Convert_core_Project_To_v1alpha1_Project(in *core.Project, out *Project, s conversion.Scope) error {
+ return autoConvert_core_Project_To_v1alpha1_Project(in, out, s)
+}
+
+func autoConvert_v1alpha1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.Project, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Project_To_core_Project(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ProjectList_To_core_ProjectList is an autogenerated conversion function.
+func Convert_v1alpha1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ProjectList_To_core_ProjectList(in, out, s)
+}
+
+func autoConvert_core_ProjectList_To_v1alpha1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ if err := Convert_core_Project_To_v1alpha1_Project(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_ProjectList_To_v1alpha1_ProjectList is an autogenerated conversion function.
+func Convert_core_ProjectList_To_v1alpha1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error {
+ return autoConvert_core_ProjectList_To_v1alpha1_ProjectList(in, out, s)
+}
+
+func autoConvert_v1alpha1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error {
+ out.Subject = in.Subject
+ // WARNING: in.Role requires manual conversion: does not exist in peer-type
+ out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles))
+ return nil
+}
+
+func autoConvert_core_ProjectMember_To_v1alpha1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error {
+ out.Subject = in.Subject
+ out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles))
+ return nil
+}
+
+func autoConvert_v1alpha1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error {
+ out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy))
+ out.Description = (*string)(unsafe.Pointer(in.Description))
+ out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner))
+ out.Purpose = (*string)(unsafe.Pointer(in.Purpose))
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]core.ProjectMember, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_ProjectMember_To_core_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Members = nil
+ }
+ out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
+ out.Tolerations = (*core.ProjectTolerations)(unsafe.Pointer(in.Tolerations))
+ return nil
+}
+
+func autoConvert_core_ProjectSpec_To_v1alpha1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error {
+ out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy))
+ out.Description = (*string)(unsafe.Pointer(in.Description))
+ out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner))
+ out.Purpose = (*string)(unsafe.Pointer(in.Purpose))
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]ProjectMember, len(*in))
+ for i := range *in {
+ if err := Convert_core_ProjectMember_To_v1alpha1_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Members = nil
+ }
+ out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
+ out.Tolerations = (*ProjectTolerations)(unsafe.Pointer(in.Tolerations))
+ return nil
+}
+
+func autoConvert_v1alpha1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Phase = core.ProjectPhase(in.Phase)
+ out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp))
+ out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp))
+ return nil
+}
+
+// Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus is an autogenerated conversion function.
+func Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ProjectStatus_To_core_ProjectStatus(in, out, s)
+}
+
+func autoConvert_core_ProjectStatus_To_v1alpha1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Phase = ProjectPhase(in.Phase)
+ out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp))
+ out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp))
+ return nil
+}
+
+// Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus is an autogenerated conversion function.
+func Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error {
+ return autoConvert_core_ProjectStatus_To_v1alpha1_ProjectStatus(in, out, s)
+}
+
+func autoConvert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error {
+ out.Defaults = *(*[]core.Toleration)(unsafe.Pointer(&in.Defaults))
+ out.Whitelist = *(*[]core.Toleration)(unsafe.Pointer(&in.Whitelist))
+ return nil
+}
+
+// Convert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations is an autogenerated conversion function.
+func Convert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(in, out, s)
+}
+
+func autoConvert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error {
+ out.Defaults = *(*[]Toleration)(unsafe.Pointer(&in.Defaults))
+ out.Whitelist = *(*[]Toleration)(unsafe.Pointer(&in.Whitelist))
+ return nil
+}
+
+// Convert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations is an autogenerated conversion function.
+func Convert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error {
+ return autoConvert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(in, out, s)
+}
+
+func autoConvert_v1alpha1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig))
+ out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig))
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]core.Worker, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Worker_To_core_Worker(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Workers = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Provider_To_core_Provider is an autogenerated conversion function.
+func Convert_v1alpha1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Provider_To_core_Provider(in, out, s)
+}
+
+func autoConvert_core_Provider_To_v1alpha1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig))
+ out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig))
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]Worker, len(*in))
+ for i := range *in {
+ if err := Convert_core_Worker_To_v1alpha1_Worker(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Workers = nil
+ }
+ return nil
+}
+
+// Convert_core_Provider_To_v1alpha1_Provider is an autogenerated conversion function.
+func Convert_core_Provider_To_v1alpha1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error {
+ return autoConvert_core_Provider_To_v1alpha1_Provider(in, out, s)
+}
+
+func autoConvert_v1alpha1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Quota_To_core_Quota is an autogenerated conversion function.
+func Convert_v1alpha1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Quota_To_core_Quota(in, out, s)
+}
+
+func autoConvert_core_Quota_To_v1alpha1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Quota_To_v1alpha1_Quota is an autogenerated conversion function.
+func Convert_core_Quota_To_v1alpha1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error {
+ return autoConvert_core_Quota_To_v1alpha1_Quota(in, out, s)
+}
+
+func autoConvert_v1alpha1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.Quota)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_QuotaList_To_core_QuotaList is an autogenerated conversion function.
+func Convert_v1alpha1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_QuotaList_To_core_QuotaList(in, out, s)
+}
+
+func autoConvert_core_QuotaList_To_v1alpha1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]Quota)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_QuotaList_To_v1alpha1_QuotaList is an autogenerated conversion function.
+func Convert_core_QuotaList_To_v1alpha1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error {
+ return autoConvert_core_QuotaList_To_v1alpha1_QuotaList(in, out, s)
+}
+
+func autoConvert_v1alpha1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error {
+ out.ClusterLifetimeDays = (*int32)(unsafe.Pointer(in.ClusterLifetimeDays))
+ out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics))
+ out.Scope = in.Scope
+ return nil
+}
+
+// Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec is an autogenerated conversion function.
+func Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_QuotaSpec_To_core_QuotaSpec(in, out, s)
+}
+
+func autoConvert_core_QuotaSpec_To_v1alpha1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error {
+ out.ClusterLifetimeDays = (*int32)(unsafe.Pointer(in.ClusterLifetimeDays))
+ out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics))
+ out.Scope = in.Scope
+ return nil
+}
+
+// Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec is an autogenerated conversion function.
+func Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error {
+ return autoConvert_core_QuotaSpec_To_v1alpha1_QuotaSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Zones = *(*[]core.AvailabilityZone)(unsafe.Pointer(&in.Zones))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ return nil
+}
+
+// Convert_v1alpha1_Region_To_core_Region is an autogenerated conversion function.
+func Convert_v1alpha1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Region_To_core_Region(in, out, s)
+}
+
+func autoConvert_core_Region_To_v1alpha1_Region(in *core.Region, out *Region, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Zones = *(*[]AvailabilityZone)(unsafe.Pointer(&in.Zones))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ return nil
+}
+
+// Convert_core_Region_To_v1alpha1_Region is an autogenerated conversion function.
+func Convert_core_Region_To_v1alpha1_Region(in *core.Region, out *Region, s conversion.Scope) error {
+ return autoConvert_core_Region_To_v1alpha1_Region(in, out, s)
+}
+
+func autoConvert_v1alpha1_ResourceData_To_core_ResourceData(in *ResourceData, out *core.ResourceData, s conversion.Scope) error {
+ out.CrossVersionObjectReference = in.CrossVersionObjectReference
+ out.Data = in.Data
+ return nil
+}
+
+// Convert_v1alpha1_ResourceData_To_core_ResourceData is an autogenerated conversion function.
+func Convert_v1alpha1_ResourceData_To_core_ResourceData(in *ResourceData, out *core.ResourceData, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ResourceData_To_core_ResourceData(in, out, s)
+}
+
+func autoConvert_core_ResourceData_To_v1alpha1_ResourceData(in *core.ResourceData, out *ResourceData, s conversion.Scope) error {
+ out.CrossVersionObjectReference = in.CrossVersionObjectReference
+ out.Data = in.Data
+ return nil
+}
+
+// Convert_core_ResourceData_To_v1alpha1_ResourceData is an autogenerated conversion function.
+func Convert_core_ResourceData_To_v1alpha1_ResourceData(in *core.ResourceData, out *ResourceData, s conversion.Scope) error {
+ return autoConvert_core_ResourceData_To_v1alpha1_ResourceData(in, out, s)
+}
+
+func autoConvert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error {
+ out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
+ out.Resource = in.Resource
+ out.CacheSize = in.CacheSize
+ return nil
+}
+
+// Convert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize is an autogenerated conversion function.
+func Convert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in, out, s)
+}
+
+func autoConvert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error {
+ out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
+ out.Resource = in.Resource
+ out.CacheSize = in.CacheSize
+ return nil
+}
+
+// Convert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize is an autogenerated conversion function.
+func Convert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error {
+ return autoConvert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(in, out, s)
+}
+
+func autoConvert_v1alpha1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ out.SecretRef = in.SecretRef
+ out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas))
+ return nil
+}
+
+// Convert_v1alpha1_SecretBinding_To_core_SecretBinding is an autogenerated conversion function.
+func Convert_v1alpha1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SecretBinding_To_core_SecretBinding(in, out, s)
+}
+
+func autoConvert_core_SecretBinding_To_v1alpha1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ out.SecretRef = in.SecretRef
+ out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas))
+ return nil
+}
+
+// Convert_core_SecretBinding_To_v1alpha1_SecretBinding is an autogenerated conversion function.
+func Convert_core_SecretBinding_To_v1alpha1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error {
+ return autoConvert_core_SecretBinding_To_v1alpha1_SecretBinding(in, out, s)
+}
+
+func autoConvert_v1alpha1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.SecretBinding)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_SecretBindingList_To_core_SecretBindingList is an autogenerated conversion function.
+func Convert_v1alpha1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SecretBindingList_To_core_SecretBindingList(in, out, s)
+}
+
+func autoConvert_core_SecretBindingList_To_v1alpha1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]SecretBinding)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_SecretBindingList_To_v1alpha1_SecretBindingList is an autogenerated conversion function.
+func Convert_core_SecretBindingList_To_v1alpha1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error {
+ return autoConvert_core_SecretBindingList_To_v1alpha1_SecretBindingList(in, out, s)
+}
+
+func autoConvert_v1alpha1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_SeedSpec_To_core_SeedSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_SeedStatus_To_core_SeedStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_core_Seed_To_v1alpha1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_SeedSpec_To_v1alpha1_SeedSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_SeedStatus_To_v1alpha1_SeedStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1alpha1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error {
+ out.Provider = in.Provider
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = (*string)(unsafe.Pointer(in.Region))
+ out.SecretRef = in.SecretRef
+ return nil
+}
+
+// Convert_v1alpha1_SeedBackup_To_core_SeedBackup is an autogenerated conversion function.
+func Convert_v1alpha1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedBackup_To_core_SeedBackup(in, out, s)
+}
+
+func autoConvert_core_SeedBackup_To_v1alpha1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error {
+ out.Provider = in.Provider
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = (*string)(unsafe.Pointer(in.Region))
+ out.SecretRef = in.SecretRef
+ return nil
+}
+
+// Convert_core_SeedBackup_To_v1alpha1_SeedBackup is an autogenerated conversion function.
+func Convert_core_SeedBackup_To_v1alpha1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error {
+ return autoConvert_core_SeedBackup_To_v1alpha1_SeedBackup(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error {
+ out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain))
+ out.Provider = (*core.SeedDNSProvider)(unsafe.Pointer(in.Provider))
+ return nil
+}
+
+// Convert_v1alpha1_SeedDNS_To_core_SeedDNS is an autogenerated conversion function.
+func Convert_v1alpha1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedDNS_To_core_SeedDNS(in, out, s)
+}
+
+func autoConvert_core_SeedDNS_To_v1alpha1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error {
+ out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain))
+ out.Provider = (*SeedDNSProvider)(unsafe.Pointer(in.Provider))
+ return nil
+}
+
+// Convert_core_SeedDNS_To_v1alpha1_SeedDNS is an autogenerated conversion function.
+func Convert_core_SeedDNS_To_v1alpha1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error {
+ return autoConvert_core_SeedDNS_To_v1alpha1_SeedDNS(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.SecretRef = in.SecretRef
+ out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider is an autogenerated conversion function.
+func Convert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(in, out, s)
+}
+
+func autoConvert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.SecretRef = in.SecretRef
+ out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider is an autogenerated conversion function.
+func Convert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error {
+ return autoConvert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.Seed, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Seed_To_core_Seed(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_SeedList_To_core_SeedList is an autogenerated conversion function.
+func Convert_v1alpha1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedList_To_core_SeedList(in, out, s)
+}
+
+func autoConvert_core_SeedList_To_v1alpha1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Seed, len(*in))
+ for i := range *in {
+ if err := Convert_core_Seed_To_v1alpha1_Seed(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_SeedList_To_v1alpha1_SeedList is an autogenerated conversion function.
+func Convert_core_SeedList_To_v1alpha1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error {
+ return autoConvert_core_SeedList_To_v1alpha1_SeedList(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error {
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Pods = in.Pods
+ out.Services = in.Services
+ out.ShootDefaults = (*core.ShootNetworks)(unsafe.Pointer(in.ShootDefaults))
+ return nil
+}
+
+func autoConvert_core_SeedNetworks_To_v1alpha1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error {
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Pods = in.Pods
+ out.Services = in.Services
+ out.ShootDefaults = (*ShootNetworks)(unsafe.Pointer(in.ShootDefaults))
+ // WARNING: in.BlockCIDRs requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1alpha1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_v1alpha1_SeedProvider_To_core_SeedProvider is an autogenerated conversion function.
+func Convert_v1alpha1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedProvider_To_core_SeedProvider(in, out, s)
+}
+
+func autoConvert_core_SeedProvider_To_v1alpha1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_core_SeedProvider_To_v1alpha1_SeedProvider is an autogenerated conversion function.
+func Convert_core_SeedProvider_To_v1alpha1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error {
+ return autoConvert_core_SeedProvider_To_v1alpha1_SeedProvider(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error {
+ out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
+ out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes))
+ return nil
+}
+
+// Convert_v1alpha1_SeedSelector_To_core_SeedSelector is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSelector_To_core_SeedSelector(in, out, s)
+}
+
+func autoConvert_core_SeedSelector_To_v1alpha1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error {
+ out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
+ out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes))
+ return nil
+}
+
+// Convert_core_SeedSelector_To_v1alpha1_SeedSelector is an autogenerated conversion function.
+func Convert_core_SeedSelector_To_v1alpha1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error {
+ return autoConvert_core_SeedSelector_To_v1alpha1_SeedSelector(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in, out, s)
+}
+
+func autoConvert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation is an autogenerated conversion function.
+func Convert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ return nil
+}
+
+// Convert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in, out, s)
+}
+
+func autoConvert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ return nil
+}
+
+// Convert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices is an autogenerated conversion function.
+func Convert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error {
+ out.Visible = in.Visible
+ return nil
+}
+
+// Convert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(in, out, s)
+}
+
+func autoConvert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error {
+ out.Visible = in.Visible
+ return nil
+}
+
+// Convert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling is an autogenerated conversion function.
+func Convert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in, out, s)
+}
+
+func autoConvert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS is an autogenerated conversion function.
+func Convert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error {
+ out.ExcessCapacityReservation = (*core.SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation))
+ out.Scheduling = (*core.SeedSettingScheduling)(unsafe.Pointer(in.Scheduling))
+ out.ShootDNS = (*core.SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS))
+ out.LoadBalancerServices = (*core.SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices))
+ out.VerticalPodAutoscaler = (*core.SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_v1alpha1_SeedSettings_To_core_SeedSettings is an autogenerated conversion function.
+func Convert_v1alpha1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedSettings_To_core_SeedSettings(in, out, s)
+}
+
+func autoConvert_core_SeedSettings_To_v1alpha1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error {
+ out.ExcessCapacityReservation = (*SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation))
+ out.Scheduling = (*SeedSettingScheduling)(unsafe.Pointer(in.Scheduling))
+ out.ShootDNS = (*SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS))
+ out.LoadBalancerServices = (*SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices))
+ out.VerticalPodAutoscaler = (*SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_core_SeedSettings_To_v1alpha1_SeedSettings is an autogenerated conversion function.
+func Convert_core_SeedSettings_To_v1alpha1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error {
+ return autoConvert_core_SeedSettings_To_v1alpha1_SeedSettings(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error {
+ out.Backup = (*core.SeedBackup)(unsafe.Pointer(in.Backup))
+ // WARNING: in.BlockCIDRs requires manual conversion: does not exist in peer-type
+ if err := Convert_v1alpha1_SeedDNS_To_core_SeedDNS(&in.DNS, &out.DNS, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(&in.Networks, &out.Networks, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_SeedProvider_To_core_SeedProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
+ out.Taints = *(*[]core.SeedTaint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*core.SeedVolume)(unsafe.Pointer(in.Volume))
+ out.Settings = (*core.SeedSettings)(unsafe.Pointer(in.Settings))
+ out.Ingress = (*core.Ingress)(unsafe.Pointer(in.Ingress))
+ return nil
+}
+
+func autoConvert_core_SeedSpec_To_v1alpha1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error {
+ out.Backup = (*SeedBackup)(unsafe.Pointer(in.Backup))
+ if err := Convert_core_SeedDNS_To_v1alpha1_SeedDNS(&in.DNS, &out.DNS, s); err != nil {
+ return err
+ }
+ if err := Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(&in.Networks, &out.Networks, s); err != nil {
+ return err
+ }
+ if err := Convert_core_SeedProvider_To_v1alpha1_SeedProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
+ out.Settings = (*SeedSettings)(unsafe.Pointer(in.Settings))
+ out.Taints = *(*[]SeedTaint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*SeedVolume)(unsafe.Pointer(in.Volume))
+ out.Ingress = (*Ingress)(unsafe.Pointer(in.Ingress))
+ return nil
+}
+
+func autoConvert_v1alpha1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.Gardener = (*core.Gardener)(unsafe.Pointer(in.Gardener))
+ out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ return nil
+}
+
+// Convert_v1alpha1_SeedStatus_To_core_SeedStatus is an autogenerated conversion function.
+func Convert_v1alpha1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedStatus_To_core_SeedStatus(in, out, s)
+}
+
+func autoConvert_core_SeedStatus_To_v1alpha1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error {
+ out.Gardener = (*Gardener)(unsafe.Pointer(in.Gardener))
+ out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion))
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ // WARNING: in.Capacity requires manual conversion: does not exist in peer-type
+ // WARNING: in.Allocatable requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1alpha1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_v1alpha1_SeedTaint_To_core_SeedTaint is an autogenerated conversion function.
+func Convert_v1alpha1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedTaint_To_core_SeedTaint(in, out, s)
+}
+
+func autoConvert_core_SeedTaint_To_v1alpha1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_core_SeedTaint_To_v1alpha1_SeedTaint is an autogenerated conversion function.
+func Convert_core_SeedTaint_To_v1alpha1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error {
+ return autoConvert_core_SeedTaint_To_v1alpha1_SeedTaint(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error {
+ out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize))
+ out.Providers = *(*[]core.SeedVolumeProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_v1alpha1_SeedVolume_To_core_SeedVolume is an autogenerated conversion function.
+func Convert_v1alpha1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedVolume_To_core_SeedVolume(in, out, s)
+}
+
+func autoConvert_core_SeedVolume_To_v1alpha1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error {
+ out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize))
+ out.Providers = *(*[]SeedVolumeProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_core_SeedVolume_To_v1alpha1_SeedVolume is an autogenerated conversion function.
+func Convert_core_SeedVolume_To_v1alpha1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error {
+ return autoConvert_core_SeedVolume_To_v1alpha1_SeedVolume(in, out, s)
+}
+
+func autoConvert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error {
+ out.Purpose = in.Purpose
+ out.Name = in.Name
+ return nil
+}
+
+// Convert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider is an autogenerated conversion function.
+func Convert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error {
+ return autoConvert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(in, out, s)
+}
+
+func autoConvert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error {
+ out.Purpose = in.Purpose
+ out.Name = in.Name
+ return nil
+}
+
+// Convert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider is an autogenerated conversion function.
+func Convert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error {
+ return autoConvert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(in, out, s)
+}
+
+func autoConvert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error {
+ out.Issuer = (*string)(unsafe.Pointer(in.Issuer))
+ out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret))
+ return nil
+}
+
+// Convert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig is an autogenerated conversion function.
+func Convert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(in, out, s)
+}
+
+func autoConvert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error {
+ out.Issuer = (*string)(unsafe.Pointer(in.Issuer))
+ out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret))
+ return nil
+}
+
+// Convert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig is an autogenerated conversion function.
+func Convert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error {
+ return autoConvert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_ShootSpec_To_core_ShootSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_ShootStatus_To_core_ShootStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_Shoot_To_core_Shoot is an autogenerated conversion function.
+func Convert_v1alpha1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Shoot_To_core_Shoot(in, out, s)
+}
+
+func autoConvert_core_Shoot_To_v1alpha1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ShootSpec_To_v1alpha1_ShootSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_ShootStatus_To_v1alpha1_ShootStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Shoot_To_v1alpha1_Shoot is an autogenerated conversion function.
+func Convert_core_Shoot_To_v1alpha1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error {
+ return autoConvert_core_Shoot_To_v1alpha1_Shoot(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.Shoot, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Shoot_To_core_Shoot(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ShootList_To_core_ShootList is an autogenerated conversion function.
+func Convert_v1alpha1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootList_To_core_ShootList(in, out, s)
+}
+
+func autoConvert_core_ShootList_To_v1alpha1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Shoot, len(*in))
+ for i := range *in {
+ if err := Convert_core_Shoot_To_v1alpha1_Shoot(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_ShootList_To_v1alpha1_ShootList is an autogenerated conversion function.
+func Convert_core_ShootList_To_v1alpha1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error {
+ return autoConvert_core_ShootList_To_v1alpha1_ShootList(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ if err := metav1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage is an autogenerated conversion function.
+func Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(in, out, s)
+}
+
+func autoConvert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ if err := metav1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage is an autogenerated conversion function.
+func Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error {
+ return autoConvert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error {
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_v1alpha1_ShootNetworks_To_core_ShootNetworks is an autogenerated conversion function.
+func Convert_v1alpha1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootNetworks_To_core_ShootNetworks(in, out, s)
+}
+
+func autoConvert_core_ShootNetworks_To_v1alpha1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error {
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_core_ShootNetworks_To_v1alpha1_ShootNetworks is an autogenerated conversion function.
+func Convert_core_ShootNetworks_To_v1alpha1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error {
+ return autoConvert_core_ShootNetworks_To_v1alpha1_ShootNetworks(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error {
+ out.Addons = (*core.Addons)(unsafe.Pointer(in.Addons))
+ out.CloudProfileName = in.CloudProfileName
+ out.DNS = (*core.DNS)(unsafe.Pointer(in.DNS))
+ out.Extensions = *(*[]core.Extension)(unsafe.Pointer(&in.Extensions))
+ out.Hibernation = (*core.Hibernation)(unsafe.Pointer(in.Hibernation))
+ if err := Convert_v1alpha1_Kubernetes_To_core_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_Networking_To_core_Networking(&in.Networking, &out.Networking, s); err != nil {
+ return err
+ }
+ out.Maintenance = (*core.Maintenance)(unsafe.Pointer(in.Maintenance))
+ out.Monitoring = (*core.Monitoring)(unsafe.Pointer(in.Monitoring))
+ if err := Convert_v1alpha1_Provider_To_core_Provider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.Purpose = (*core.ShootPurpose)(unsafe.Pointer(in.Purpose))
+ out.Region = in.Region
+ out.SecretBindingName = in.SecretBindingName
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Resources = *(*[]core.NamedResourceReference)(unsafe.Pointer(&in.Resources))
+ out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations))
+ return nil
+}
+
+// Convert_v1alpha1_ShootSpec_To_core_ShootSpec is an autogenerated conversion function.
+func Convert_v1alpha1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootSpec_To_core_ShootSpec(in, out, s)
+}
+
+func autoConvert_core_ShootSpec_To_v1alpha1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error {
+ out.Addons = (*Addons)(unsafe.Pointer(in.Addons))
+ out.CloudProfileName = in.CloudProfileName
+ out.DNS = (*DNS)(unsafe.Pointer(in.DNS))
+ out.Extensions = *(*[]Extension)(unsafe.Pointer(&in.Extensions))
+ out.Hibernation = (*Hibernation)(unsafe.Pointer(in.Hibernation))
+ if err := Convert_core_Kubernetes_To_v1alpha1_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ if err := Convert_core_Networking_To_v1alpha1_Networking(&in.Networking, &out.Networking, s); err != nil {
+ return err
+ }
+ out.Maintenance = (*Maintenance)(unsafe.Pointer(in.Maintenance))
+ out.Monitoring = (*Monitoring)(unsafe.Pointer(in.Monitoring))
+ if err := Convert_core_Provider_To_v1alpha1_Provider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.Purpose = (*ShootPurpose)(unsafe.Pointer(in.Purpose))
+ out.Region = in.Region
+ out.SecretBindingName = in.SecretBindingName
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Resources = *(*[]NamedResourceReference)(unsafe.Pointer(&in.Resources))
+ out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations))
+ return nil
+}
+
+// Convert_core_ShootSpec_To_v1alpha1_ShootSpec is an autogenerated conversion function.
+func Convert_core_ShootSpec_To_v1alpha1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error {
+ return autoConvert_core_ShootSpec_To_v1alpha1_ShootSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootState_To_core_ShootState(in *ShootState, out *core.ShootState, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1alpha1_ShootState_To_core_ShootState is an autogenerated conversion function.
+func Convert_v1alpha1_ShootState_To_core_ShootState(in *ShootState, out *core.ShootState, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootState_To_core_ShootState(in, out, s)
+}
+
+func autoConvert_core_ShootState_To_v1alpha1_ShootState(in *core.ShootState, out *ShootState, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ShootState_To_v1alpha1_ShootState is an autogenerated conversion function.
+func Convert_core_ShootState_To_v1alpha1_ShootState(in *core.ShootState, out *ShootState, s conversion.Scope) error {
+ return autoConvert_core_ShootState_To_v1alpha1_ShootState(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootStateList_To_core_ShootStateList(in *ShootStateList, out *core.ShootStateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.ShootState)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1alpha1_ShootStateList_To_core_ShootStateList is an autogenerated conversion function.
+func Convert_v1alpha1_ShootStateList_To_core_ShootStateList(in *ShootStateList, out *core.ShootStateList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootStateList_To_core_ShootStateList(in, out, s)
+}
+
+func autoConvert_core_ShootStateList_To_v1alpha1_ShootStateList(in *core.ShootStateList, out *ShootStateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]ShootState)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_ShootStateList_To_v1alpha1_ShootStateList is an autogenerated conversion function.
+func Convert_core_ShootStateList_To_v1alpha1_ShootStateList(in *core.ShootStateList, out *ShootStateList, s conversion.Scope) error {
+ return autoConvert_core_ShootStateList_To_v1alpha1_ShootStateList(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(in *ShootStateSpec, out *core.ShootStateSpec, s conversion.Scope) error {
+ out.Gardener = *(*[]core.GardenerResourceData)(unsafe.Pointer(&in.Gardener))
+ out.Extensions = *(*[]core.ExtensionResourceState)(unsafe.Pointer(&in.Extensions))
+ out.Resources = *(*[]core.ResourceData)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec is an autogenerated conversion function.
+func Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(in *ShootStateSpec, out *core.ShootStateSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(in, out, s)
+}
+
+func autoConvert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(in *core.ShootStateSpec, out *ShootStateSpec, s conversion.Scope) error {
+ out.Gardener = *(*[]GardenerResourceData)(unsafe.Pointer(&in.Gardener))
+ out.Extensions = *(*[]ExtensionResourceState)(unsafe.Pointer(&in.Extensions))
+ out.Resources = *(*[]ResourceData)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec is an autogenerated conversion function.
+func Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(in *core.ShootStateSpec, out *ShootStateSpec, s conversion.Scope) error {
+ return autoConvert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.Constraints = *(*[]core.Condition)(unsafe.Pointer(&in.Constraints))
+ if err := Convert_v1alpha1_Gardener_To_core_Gardener(&in.Gardener, &out.Gardener, s); err != nil {
+ return err
+ }
+ out.IsHibernated = in.IsHibernated
+ out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation))
+ // WARNING: in.LastError requires manual conversion: does not exist in peer-type
+ out.LastErrors = *(*[]core.LastError)(unsafe.Pointer(&in.LastErrors))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime))
+ // WARNING: in.Seed requires manual conversion: does not exist in peer-type
+ out.TechnicalID = in.TechnicalID
+ out.UID = types.UID(in.UID)
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ return nil
+}
+
+func autoConvert_core_ShootStatus_To_v1alpha1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.Constraints = *(*[]Condition)(unsafe.Pointer(&in.Constraints))
+ if err := Convert_core_Gardener_To_v1alpha1_Gardener(&in.Gardener, &out.Gardener, s); err != nil {
+ return err
+ }
+ out.IsHibernated = in.IsHibernated
+ out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastErrors = *(*[]LastError)(unsafe.Pointer(&in.LastErrors))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime))
+ // WARNING: in.SeedName requires manual conversion: does not exist in peer-type
+ out.TechnicalID = in.TechnicalID
+ out.UID = types.UID(in.UID)
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ return nil
+}
+
+func autoConvert_v1alpha1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_v1alpha1_Toleration_To_core_Toleration is an autogenerated conversion function.
+func Convert_v1alpha1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Toleration_To_core_Toleration(in, out, s)
+}
+
+func autoConvert_core_Toleration_To_v1alpha1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_core_Toleration_To_v1alpha1_Toleration is an autogenerated conversion function.
+func Convert_core_Toleration_To_v1alpha1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error {
+ return autoConvert_core_Toleration_To_v1alpha1_Toleration(in, out, s)
+}
+
+func autoConvert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold))
+ out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst))
+ out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit))
+ out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance))
+ out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction))
+ out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval))
+ out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval))
+ return nil
+}
+
+// Convert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold))
+ out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst))
+ out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit))
+ out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance))
+ out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction))
+ out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval))
+ out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval))
+ return nil
+}
+
+// Convert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_v1alpha1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error {
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_v1alpha1_Volume_To_core_Volume is an autogenerated conversion function.
+func Convert_v1alpha1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Volume_To_core_Volume(in, out, s)
+}
+
+func autoConvert_core_Volume_To_v1alpha1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error {
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_core_Volume_To_v1alpha1_Volume is an autogenerated conversion function.
+func Convert_core_Volume_To_v1alpha1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error {
+ return autoConvert_core_Volume_To_v1alpha1_Volume(in, out, s)
+}
+
+func autoConvert_v1alpha1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error {
+ out.Class = in.Class
+ out.Name = in.Name
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_v1alpha1_VolumeType_To_core_VolumeType is an autogenerated conversion function.
+func Convert_v1alpha1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error {
+ return autoConvert_v1alpha1_VolumeType_To_core_VolumeType(in, out, s)
+}
+
+func autoConvert_core_VolumeType_To_v1alpha1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error {
+ out.Class = in.Class
+ out.Name = in.Name
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_core_VolumeType_To_v1alpha1_VolumeType is an autogenerated conversion function.
+func Convert_core_VolumeType_To_v1alpha1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error {
+ return autoConvert_core_VolumeType_To_v1alpha1_VolumeType(in, out, s)
+}
+
+func autoConvert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error {
+ out.Default = (*int32)(unsafe.Pointer(in.Default))
+ out.Resources = *(*[]core.ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes is an autogenerated conversion function.
+func Convert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error {
+ return autoConvert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(in, out, s)
+}
+
+func autoConvert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error {
+ out.Default = (*int32)(unsafe.Pointer(in.Default))
+ out.Resources = *(*[]ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes is an autogenerated conversion function.
+func Convert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error {
+ return autoConvert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(in, out, s)
+}
+
+func autoConvert_v1alpha1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.CRI = (*core.CRI)(unsafe.Pointer(in.CRI))
+ out.Kubernetes = (*core.WorkerKubernetes)(unsafe.Pointer(in.Kubernetes))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Name = in.Name
+ if err := Convert_v1alpha1_Machine_To_core_Machine(&in.Machine, &out.Machine, s); err != nil {
+ return err
+ }
+ out.Maximum = in.Maximum
+ out.Minimum = in.Minimum
+ out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge))
+ out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*core.Volume)(unsafe.Pointer(in.Volume))
+ out.DataVolumes = *(*[]core.DataVolume)(unsafe.Pointer(&in.DataVolumes))
+ out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName))
+ out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones))
+ out.SystemComponents = (*core.WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents))
+ out.MachineControllerManagerSettings = (*core.MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings))
+ return nil
+}
+
+// Convert_v1alpha1_Worker_To_core_Worker is an autogenerated conversion function.
+func Convert_v1alpha1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Worker_To_core_Worker(in, out, s)
+}
+
+func autoConvert_core_Worker_To_v1alpha1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.CRI = (*CRI)(unsafe.Pointer(in.CRI))
+ out.Kubernetes = (*WorkerKubernetes)(unsafe.Pointer(in.Kubernetes))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Name = in.Name
+ if err := Convert_core_Machine_To_v1alpha1_Machine(&in.Machine, &out.Machine, s); err != nil {
+ return err
+ }
+ out.Maximum = in.Maximum
+ out.Minimum = in.Minimum
+ out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge))
+ out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.SystemComponents = (*WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents))
+ out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*Volume)(unsafe.Pointer(in.Volume))
+ out.DataVolumes = *(*[]DataVolume)(unsafe.Pointer(&in.DataVolumes))
+ out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName))
+ out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones))
+ out.MachineControllerManagerSettings = (*MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings))
+ return nil
+}
+
+// Convert_core_Worker_To_v1alpha1_Worker is an autogenerated conversion function.
+func Convert_core_Worker_To_v1alpha1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error {
+ return autoConvert_core_Worker_To_v1alpha1_Worker(in, out, s)
+}
+
+func autoConvert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error {
+ out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ return nil
+}
+
+// Convert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes is an autogenerated conversion function.
+func Convert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error {
+ return autoConvert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(in, out, s)
+}
+
+func autoConvert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error {
+ out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ return nil
+}
+
+// Convert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes is an autogenerated conversion function.
+func Convert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error {
+ return autoConvert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(in, out, s)
+}
+
+func autoConvert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error {
+ out.Allow = in.Allow
+ return nil
+}
+
+// Convert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents is an autogenerated conversion function.
+func Convert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error {
+ return autoConvert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(in, out, s)
+}
+
+func autoConvert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error {
+ out.Allow = in.Allow
+ return nil
+}
+
+// Convert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents is an autogenerated conversion function.
+func Convert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error {
+ return autoConvert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(in, out, s)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..133b376
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,4109 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addon) DeepCopyInto(out *Addon) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
+func (in *Addon) DeepCopy() *Addon {
+ if in == nil {
+ return nil
+ }
+ out := new(Addon)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addons) DeepCopyInto(out *Addons) {
+ *out = *in
+ if in.KubernetesDashboard != nil {
+ in, out := &in.KubernetesDashboard, &out.KubernetesDashboard
+ *out = new(KubernetesDashboard)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NginxIngress != nil {
+ in, out := &in.NginxIngress, &out.NginxIngress
+ *out = new(NginxIngress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addons.
+func (in *Addons) DeepCopy() *Addons {
+ if in == nil {
+ return nil
+ }
+ out := new(Addons)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) {
+ *out = *in
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPlugin.
+func (in *AdmissionPlugin) DeepCopy() *AdmissionPlugin {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionPlugin)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Alerting) DeepCopyInto(out *Alerting) {
+ *out = *in
+ if in.EmailReceivers != nil {
+ in, out := &in.EmailReceivers, &out.EmailReceivers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting.
+func (in *Alerting) DeepCopy() *Alerting {
+ if in == nil {
+ return nil
+ }
+ out := new(Alerting)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
+ *out = *in
+ if in.AuditPolicy != nil {
+ in, out := &in.AuditPolicy, &out.AuditPolicy
+ *out = new(AuditPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
+func (in *AuditConfig) DeepCopy() *AuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditPolicy) DeepCopyInto(out *AuditPolicy) {
+ *out = *in
+ if in.ConfigMapRef != nil {
+ in, out := &in.ConfigMapRef, &out.ConfigMapRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicy.
+func (in *AuditPolicy) DeepCopy() *AuditPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) {
+ *out = *in
+ if in.UnavailableMachineTypes != nil {
+ in, out := &in.UnavailableMachineTypes, &out.UnavailableMachineTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UnavailableVolumeTypes != nil {
+ in, out := &in.UnavailableVolumeTypes, &out.UnavailableVolumeTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone.
+func (in *AvailabilityZone) DeepCopy() *AvailabilityZone {
+ if in == nil {
+ return nil
+ }
+ out := new(AvailabilityZone)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucket) DeepCopyInto(out *BackupBucket) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket.
+func (in *BackupBucket) DeepCopy() *BackupBucket {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucket)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucket) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupBucket, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList.
+func (in *BackupBucketList) DeepCopy() *BackupBucketList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucketList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketProvider) DeepCopyInto(out *BackupBucketProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketProvider.
+func (in *BackupBucketProvider) DeepCopy() *BackupBucketProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) {
+ *out = *in
+ out.Provider = in.Provider
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ out.SecretRef = in.SecretRef
+ if in.Seed != nil {
+ in, out := &in.Seed, &out.Seed
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec.
+func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) {
+ *out = *in
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GeneratedSecretRef != nil {
+ in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus.
+func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntry) DeepCopyInto(out *BackupEntry) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry.
+func (in *BackupEntry) DeepCopy() *BackupEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntry) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList.
+func (in *BackupEntryList) DeepCopy() *BackupEntryList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) {
+ *out = *in
+ if in.Seed != nil {
+ in, out := &in.Seed, &out.Seed
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec.
+func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntrySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) {
+ *out = *in
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus.
+func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CRI) DeepCopyInto(out *CRI) {
+ *out = *in
+ if in.ContainerRuntimes != nil {
+ in, out := &in.ContainerRuntimes, &out.ContainerRuntimes
+ *out = make([]ContainerRuntime, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRI.
+func (in *CRI) DeepCopy() *CRI {
+ if in == nil {
+ return nil
+ }
+ out := new(CRI)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudInfo) DeepCopyInto(out *CloudInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInfo.
+func (in *CloudInfo) DeepCopy() *CloudInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfile) DeepCopyInto(out *CloudProfile) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfile.
+func (in *CloudProfile) DeepCopy() *CloudProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudProfile) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfileList) DeepCopyInto(out *CloudProfileList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CloudProfile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileList.
+func (in *CloudProfileList) DeepCopy() *CloudProfileList {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfileList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudProfileList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) {
+ *out = *in
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ in.Kubernetes.DeepCopyInto(&out.Kubernetes)
+ if in.MachineImages != nil {
+ in, out := &in.MachineImages, &out.MachineImages
+ *out = make([]MachineImage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.MachineTypes != nil {
+ in, out := &in.MachineTypes, &out.MachineTypes
+ *out = make([]MachineType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Regions != nil {
+ in, out := &in.Regions, &out.Regions
+ *out = make([]Region, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(SeedSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VolumeTypes != nil {
+ in, out := &in.VolumeTypes, &out.VolumeTypes
+ *out = make([]VolumeType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileSpec.
+func (in *CloudProfileSpec) DeepCopy() *CloudProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) {
+ *out = *in
+ if in.ScaleDownDelayAfterAdd != nil {
+ in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownDelayAfterDelete != nil {
+ in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownDelayAfterFailure != nil {
+ in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownUnneededTime != nil {
+ in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownUtilizationThreshold != nil {
+ in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold
+ *out = new(float64)
+ **out = **in
+ }
+ if in.ScanInterval != nil {
+ in, out := &in.ScanInterval, &out.ScanInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler.
+func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) {
+ *out = *in
+ out.Cloud = in.Cloud
+ out.Kubernetes = in.Kubernetes
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo.
+func (in *ClusterInfo) DeepCopy() *ClusterInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ if in.Codes != nil {
+ in, out := &in.Codes, &out.Codes
+ *out = make([]ErrorCode, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntime.
+func (in *ContainerRuntime) DeepCopy() *ContainerRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Policy != nil {
+ in, out := &in.Policy, &out.Policy
+ *out = new(ControllerDeploymentPolicy)
+ **out = **in
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDeployment.
+func (in *ControllerDeployment) DeepCopy() *ControllerDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallation) DeepCopyInto(out *ControllerInstallation) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallation.
+func (in *ControllerInstallation) DeepCopy() *ControllerInstallation {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerInstallation) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationList) DeepCopyInto(out *ControllerInstallationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerInstallation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationList.
+func (in *ControllerInstallationList) DeepCopy() *ControllerInstallationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerInstallationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationSpec) DeepCopyInto(out *ControllerInstallationSpec) {
+ *out = *in
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationSpec.
+func (in *ControllerInstallationSpec) DeepCopy() *ControllerInstallationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallationStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationStatus.
+func (in *ControllerInstallationStatus) DeepCopy() *ControllerInstallationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistration) DeepCopyInto(out *ControllerRegistration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistration.
+func (in *ControllerRegistration) DeepCopy() *ControllerRegistration {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRegistration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistrationList) DeepCopyInto(out *ControllerRegistrationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerRegistration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationList.
+func (in *ControllerRegistrationList) DeepCopy() *ControllerRegistrationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistrationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRegistrationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistrationSpec) DeepCopyInto(out *ControllerRegistrationSpec) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ControllerResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Deployment != nil {
+ in, out := &in.Deployment, &out.Deployment
+ *out = new(ControllerDeployment)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationSpec.
+func (in *ControllerRegistrationSpec) DeepCopy() *ControllerRegistrationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistrationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerResource) DeepCopyInto(out *ControllerResource) {
+ *out = *in
+ if in.GloballyEnabled != nil {
+ in, out := &in.GloballyEnabled, &out.GloballyEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ReconcileTimeout != nil {
+ in, out := &in.ReconcileTimeout, &out.ReconcileTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Primary != nil {
+ in, out := &in.Primary, &out.Primary
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerResource.
+func (in *ControllerResource) DeepCopy() *ControllerResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ *out = *in
+ if in.Domain != nil {
+ in, out := &in.Domain, &out.Domain
+ *out = new(string)
+ **out = **in
+ }
+ if in.Providers != nil {
+ in, out := &in.Providers, &out.Providers
+ *out = make([]DNSProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSIncludeExclude) DeepCopyInto(out *DNSIncludeExclude) {
+ *out = *in
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Exclude != nil {
+ in, out := &in.Exclude, &out.Exclude
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSIncludeExclude.
+func (in *DNSIncludeExclude) DeepCopy() *DNSIncludeExclude {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSIncludeExclude)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProvider) DeepCopyInto(out *DNSProvider) {
+ *out = *in
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Primary != nil {
+ in, out := &in.Primary, &out.Primary
+ *out = new(bool)
+ **out = **in
+ }
+ if in.SecretName != nil {
+ in, out := &in.SecretName, &out.SecretName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider.
+func (in *DNSProvider) DeepCopy() *DNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataVolume) DeepCopyInto(out *DataVolume) {
+ *out = *in
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume.
+func (in *DataVolume) DeepCopy() *DataVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(DataVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoint) DeepCopyInto(out *Endpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
+func (in *Endpoint) DeepCopy() *Endpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(Endpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExpirableVersion) DeepCopyInto(out *ExpirableVersion) {
+ *out = *in
+ if in.ExpirationDate != nil {
+ in, out := &in.ExpirationDate, &out.ExpirationDate
+ *out = (*in).DeepCopy()
+ }
+ if in.Classification != nil {
+ in, out := &in.Classification, &out.Classification
+ *out = new(VersionClassification)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirableVersion.
+func (in *ExpirableVersion) DeepCopy() *ExpirableVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(ExpirableVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Extension) DeepCopyInto(out *Extension) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension.
+func (in *Extension) DeepCopy() *Extension {
+ if in == nil {
+ return nil
+ }
+ out := new(Extension)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtensionResourceState) DeepCopyInto(out *ExtensionResourceState) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(string)
+ **out = **in
+ }
+ if in.State != nil {
+ in, out := &in.State, &out.State
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]v1beta1.NamedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionResourceState.
+func (in *ExtensionResourceState) DeepCopy() *ExtensionResourceState {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtensionResourceState)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Gardener) DeepCopyInto(out *Gardener) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardener.
+func (in *Gardener) DeepCopy() *Gardener {
+ if in == nil {
+ return nil
+ }
+ out := new(Gardener)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GardenerResourceData) DeepCopyInto(out *GardenerResourceData) {
+ *out = *in
+ in.Data.DeepCopyInto(&out.Data)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenerResourceData.
+func (in *GardenerResourceData) DeepCopy() *GardenerResourceData {
+ if in == nil {
+ return nil
+ }
+ out := new(GardenerResourceData)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Hibernation) DeepCopyInto(out *Hibernation) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Schedules != nil {
+ in, out := &in.Schedules, &out.Schedules
+ *out = make([]HibernationSchedule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hibernation.
+func (in *Hibernation) DeepCopy() *Hibernation {
+ if in == nil {
+ return nil
+ }
+ out := new(Hibernation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HibernationSchedule) DeepCopyInto(out *HibernationSchedule) {
+ *out = *in
+ if in.Start != nil {
+ in, out := &in.Start, &out.Start
+ *out = new(string)
+ **out = **in
+ }
+ if in.End != nil {
+ in, out := &in.End, &out.End
+ *out = new(string)
+ **out = **in
+ }
+ if in.Location != nil {
+ in, out := &in.Location, &out.Location
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationSchedule.
+func (in *HibernationSchedule) DeepCopy() *HibernationSchedule {
+ if in == nil {
+ return nil
+ }
+ out := new(HibernationSchedule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) {
+ *out = *in
+ if in.CPUInitializationPeriod != nil {
+ in, out := &in.CPUInitializationPeriod, &out.CPUInitializationPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DownscaleDelay != nil {
+ in, out := &in.DownscaleDelay, &out.DownscaleDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DownscaleStabilization != nil {
+ in, out := &in.DownscaleStabilization, &out.DownscaleStabilization
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.InitialReadinessDelay != nil {
+ in, out := &in.InitialReadinessDelay, &out.InitialReadinessDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Tolerance != nil {
+ in, out := &in.Tolerance, &out.Tolerance
+ *out = new(float64)
+ **out = **in
+ }
+ if in.UpscaleDelay != nil {
+ in, out := &in.UpscaleDelay, &out.UpscaleDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig.
+func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(HorizontalPodAutoscalerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+ *out = *in
+ in.Controller.DeepCopyInto(&out.Controller)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+ if in == nil {
+ return nil
+ }
+ out := new(Ingress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressController) DeepCopyInto(out *IngressController) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController.
+func (in *IngressController) DeepCopy() *IngressController {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressController)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.AdmissionPlugins != nil {
+ in, out := &in.AdmissionPlugins, &out.AdmissionPlugins
+ *out = make([]AdmissionPlugin, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.APIAudiences != nil {
+ in, out := &in.APIAudiences, &out.APIAudiences
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AuditConfig != nil {
+ in, out := &in.AuditConfig, &out.AuditConfig
+ *out = new(AuditConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EnableBasicAuthentication != nil {
+ in, out := &in.EnableBasicAuthentication, &out.EnableBasicAuthentication
+ *out = new(bool)
+ **out = **in
+ }
+ if in.OIDCConfig != nil {
+ in, out := &in.OIDCConfig, &out.OIDCConfig
+ *out = new(OIDCConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RuntimeConfig != nil {
+ in, out := &in.RuntimeConfig, &out.RuntimeConfig
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ServiceAccountConfig != nil {
+ in, out := &in.ServiceAccountConfig, &out.ServiceAccountConfig
+ *out = new(ServiceAccountConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WatchCacheSizes != nil {
+ in, out := &in.WatchCacheSizes, &out.WatchCacheSizes
+ *out = new(WatchCacheSizes)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = new(KubeAPIServerRequests)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig.
+func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerRequests) DeepCopyInto(out *KubeAPIServerRequests) {
+ *out = *in
+ if in.MaxNonMutatingInflight != nil {
+ in, out := &in.MaxNonMutatingInflight, &out.MaxNonMutatingInflight
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MaxMutatingInflight != nil {
+ in, out := &in.MaxMutatingInflight, &out.MaxMutatingInflight
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerRequests.
+func (in *KubeAPIServerRequests) DeepCopy() *KubeAPIServerRequests {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerRequests)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.HorizontalPodAutoscalerConfig != nil {
+ in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig
+ *out = new(HorizontalPodAutoscalerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodeCIDRMaskSize != nil {
+ in, out := &in.NodeCIDRMaskSize, &out.NodeCIDRMaskSize
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodEvictionTimeout != nil {
+ in, out := &in.PodEvictionTimeout, &out.PodEvictionTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig.
+func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeProxyConfig) DeepCopyInto(out *KubeProxyConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.Mode != nil {
+ in, out := &in.Mode, &out.Mode
+ *out = new(ProxyMode)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfig.
+func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.KubeMaxPDVols != nil {
+ in, out := &in.KubeMaxPDVols, &out.KubeMaxPDVols
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfig.
+func (in *KubeSchedulerConfig) DeepCopy() *KubeSchedulerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeSchedulerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.CPUCFSQuota != nil {
+ in, out := &in.CPUCFSQuota, &out.CPUCFSQuota
+ *out = new(bool)
+ **out = **in
+ }
+ if in.CPUManagerPolicy != nil {
+ in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy
+ *out = new(string)
+ **out = **in
+ }
+ if in.EvictionHard != nil {
+ in, out := &in.EvictionHard, &out.EvictionHard
+ *out = new(KubeletConfigEviction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionMaxPodGracePeriod != nil {
+ in, out := &in.EvictionMaxPodGracePeriod, &out.EvictionMaxPodGracePeriod
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EvictionMinimumReclaim != nil {
+ in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim
+ *out = new(KubeletConfigEvictionMinimumReclaim)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionPressureTransitionPeriod != nil {
+ in, out := &in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.EvictionSoft != nil {
+ in, out := &in.EvictionSoft, &out.EvictionSoft
+ *out = new(KubeletConfigEviction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionSoftGracePeriod != nil {
+ in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod
+ *out = new(KubeletConfigEvictionSoftGracePeriod)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MaxPods != nil {
+ in, out := &in.MaxPods, &out.MaxPods
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodPIDsLimit != nil {
+ in, out := &in.PodPIDsLimit, &out.PodPIDsLimit
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ImagePullProgressDeadline != nil {
+ in, out := &in.ImagePullProgressDeadline, &out.ImagePullProgressDeadline
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.FailSwapOn != nil {
+ in, out := &in.FailSwapOn, &out.FailSwapOn
+ *out = new(bool)
+ **out = **in
+ }
+ if in.KubeReserved != nil {
+ in, out := &in.KubeReserved, &out.KubeReserved
+ *out = new(KubeletConfigReserved)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SystemReserved != nil {
+ in, out := &in.SystemReserved, &out.SystemReserved
+ *out = new(KubeletConfigReserved)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfig.
+func (in *KubeletConfig) DeepCopy() *KubeletConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEviction) DeepCopyInto(out *KubeletConfigEviction) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEviction.
+func (in *KubeletConfigEviction) DeepCopy() *KubeletConfigEviction {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEviction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEvictionMinimumReclaim) DeepCopyInto(out *KubeletConfigEvictionMinimumReclaim) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionMinimumReclaim.
+func (in *KubeletConfigEvictionMinimumReclaim) DeepCopy() *KubeletConfigEvictionMinimumReclaim {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEvictionMinimumReclaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopyInto(out *KubeletConfigEvictionSoftGracePeriod) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionSoftGracePeriod.
+func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictionSoftGracePeriod {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEvictionSoftGracePeriod)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigReserved) DeepCopyInto(out *KubeletConfigReserved) {
+ *out = *in
+ if in.CPU != nil {
+ in, out := &in.CPU, &out.CPU
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Memory != nil {
+ in, out := &in.Memory, &out.Memory
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.EphemeralStorage != nil {
+ in, out := &in.EphemeralStorage, &out.EphemeralStorage
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.PID != nil {
+ in, out := &in.PID, &out.PID
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigReserved.
+func (in *KubeletConfigReserved) DeepCopy() *KubeletConfigReserved {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigReserved)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Kubernetes) DeepCopyInto(out *Kubernetes) {
+ *out = *in
+ if in.AllowPrivilegedContainers != nil {
+ in, out := &in.AllowPrivilegedContainers, &out.AllowPrivilegedContainers
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ClusterAutoscaler != nil {
+ in, out := &in.ClusterAutoscaler, &out.ClusterAutoscaler
+ *out = new(ClusterAutoscaler)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeAPIServer != nil {
+ in, out := &in.KubeAPIServer, &out.KubeAPIServer
+ *out = new(KubeAPIServerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeControllerManager != nil {
+ in, out := &in.KubeControllerManager, &out.KubeControllerManager
+ *out = new(KubeControllerManagerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeScheduler != nil {
+ in, out := &in.KubeScheduler, &out.KubeScheduler
+ *out = new(KubeSchedulerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeProxy != nil {
+ in, out := &in.KubeProxy, &out.KubeProxy
+ *out = new(KubeProxyConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubelet != nil {
+ in, out := &in.Kubelet, &out.Kubelet
+ *out = new(KubeletConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VerticalPodAutoscaler != nil {
+ in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler
+ *out = new(VerticalPodAutoscaler)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes.
+func (in *Kubernetes) DeepCopy() *Kubernetes {
+ if in == nil {
+ return nil
+ }
+ out := new(Kubernetes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) {
+ *out = *in
+ if in.FeatureGates != nil {
+ in, out := &in.FeatureGates, &out.FeatureGates
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig.
+func (in *KubernetesConfig) DeepCopy() *KubernetesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) {
+ *out = *in
+ out.Addon = in.Addon
+ if in.AuthenticationMode != nil {
+ in, out := &in.AuthenticationMode, &out.AuthenticationMode
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesDashboard.
+func (in *KubernetesDashboard) DeepCopy() *KubernetesDashboard {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesDashboard)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesInfo) DeepCopyInto(out *KubernetesInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInfo.
+func (in *KubernetesInfo) DeepCopy() *KubernetesInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesSettings) DeepCopyInto(out *KubernetesSettings) {
+ *out = *in
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]ExpirableVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSettings.
+func (in *KubernetesSettings) DeepCopy() *KubernetesSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastError) DeepCopyInto(out *LastError) {
+ *out = *in
+ if in.TaskID != nil {
+ in, out := &in.TaskID, &out.TaskID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Codes != nil {
+ in, out := &in.Codes, &out.Codes
+ *out = make([]ErrorCode, len(*in))
+ copy(*out, *in)
+ }
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError.
+func (in *LastError) DeepCopy() *LastError {
+ if in == nil {
+ return nil
+ }
+ out := new(LastError)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastOperation) DeepCopyInto(out *LastOperation) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation.
+func (in *LastOperation) DeepCopy() *LastOperation {
+ if in == nil {
+ return nil
+ }
+ out := new(LastOperation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Machine) DeepCopyInto(out *Machine) {
+ *out = *in
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ShootMachineImage)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine.
+func (in *Machine) DeepCopy() *Machine {
+ if in == nil {
+ return nil
+ }
+ out := new(Machine)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineControllerManagerSettings) DeepCopyInto(out *MachineControllerManagerSettings) {
+ *out = *in
+ if in.MachineDrainTimeout != nil {
+ in, out := &in.MachineDrainTimeout, &out.MachineDrainTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MachineHealthTimeout != nil {
+ in, out := &in.MachineHealthTimeout, &out.MachineHealthTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MachineCreationTimeout != nil {
+ in, out := &in.MachineCreationTimeout, &out.MachineCreationTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MaxEvictRetries != nil {
+ in, out := &in.MaxEvictRetries, &out.MaxEvictRetries
+ *out = new(int32)
+ **out = **in
+ }
+ if in.NodeConditions != nil {
+ in, out := &in.NodeConditions, &out.NodeConditions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineControllerManagerSettings.
+func (in *MachineControllerManagerSettings) DeepCopy() *MachineControllerManagerSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineControllerManagerSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImage) DeepCopyInto(out *MachineImage) {
+ *out = *in
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]MachineImageVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage.
+func (in *MachineImage) DeepCopy() *MachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImageVersion) DeepCopyInto(out *MachineImageVersion) {
+ *out = *in
+ in.ExpirableVersion.DeepCopyInto(&out.ExpirableVersion)
+ if in.CRI != nil {
+ in, out := &in.CRI, &out.CRI
+ *out = make([]CRI, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImageVersion.
+func (in *MachineImageVersion) DeepCopy() *MachineImageVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImageVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineType) DeepCopyInto(out *MachineType) {
+ *out = *in
+ out.CPU = in.CPU.DeepCopy()
+ out.GPU = in.GPU.DeepCopy()
+ out.Memory = in.Memory.DeepCopy()
+ if in.Storage != nil {
+ in, out := &in.Storage, &out.Storage
+ *out = new(MachineTypeStorage)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Usable != nil {
+ in, out := &in.Usable, &out.Usable
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineType.
+func (in *MachineType) DeepCopy() *MachineType {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineTypeStorage) DeepCopyInto(out *MachineTypeStorage) {
+ *out = *in
+ out.StorageSize = in.StorageSize.DeepCopy()
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTypeStorage.
+func (in *MachineTypeStorage) DeepCopy() *MachineTypeStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineTypeStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Maintenance) DeepCopyInto(out *Maintenance) {
+ *out = *in
+ if in.AutoUpdate != nil {
+ in, out := &in.AutoUpdate, &out.AutoUpdate
+ *out = new(MaintenanceAutoUpdate)
+ **out = **in
+ }
+ if in.TimeWindow != nil {
+ in, out := &in.TimeWindow, &out.TimeWindow
+ *out = new(MaintenanceTimeWindow)
+ **out = **in
+ }
+ if in.ConfineSpecUpdateRollout != nil {
+ in, out := &in.ConfineSpecUpdateRollout, &out.ConfineSpecUpdateRollout
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintenance.
+func (in *Maintenance) DeepCopy() *Maintenance {
+ if in == nil {
+ return nil
+ }
+ out := new(Maintenance)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaintenanceAutoUpdate) DeepCopyInto(out *MaintenanceAutoUpdate) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceAutoUpdate.
+func (in *MaintenanceAutoUpdate) DeepCopy() *MaintenanceAutoUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(MaintenanceAutoUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow.
+func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow {
+ if in == nil {
+ return nil
+ }
+ out := new(MaintenanceTimeWindow)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Monitoring) DeepCopyInto(out *Monitoring) {
+ *out = *in
+ if in.Alerting != nil {
+ in, out := &in.Alerting, &out.Alerting
+ *out = new(Alerting)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring.
+func (in *Monitoring) DeepCopy() *Monitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(Monitoring)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedResourceReference) DeepCopyInto(out *NamedResourceReference) {
+ *out = *in
+ out.ResourceRef = in.ResourceRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourceReference.
+func (in *NamedResourceReference) DeepCopy() *NamedResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Networking) DeepCopyInto(out *Networking) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = new(string)
+ **out = **in
+ }
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = new(string)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking.
+func (in *Networking) DeepCopy() *Networking {
+ if in == nil {
+ return nil
+ }
+ out := new(Networking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NginxIngress) DeepCopyInto(out *NginxIngress) {
+ *out = *in
+ out.Addon = in.Addon
+ if in.LoadBalancerSourceRanges != nil {
+ in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ExternalTrafficPolicy != nil {
+ in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy
+ *out = new(v1.ServiceExternalTrafficPolicyType)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxIngress.
+func (in *NginxIngress) DeepCopy() *NginxIngress {
+ if in == nil {
+ return nil
+ }
+ out := new(NginxIngress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) {
+ *out = *in
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClientAuthentication != nil {
+ in, out := &in.ClientAuthentication, &out.ClientAuthentication
+ *out = new(OpenIDConnectClientAuthentication)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ClientID != nil {
+ in, out := &in.ClientID, &out.ClientID
+ *out = new(string)
+ **out = **in
+ }
+ if in.GroupsClaim != nil {
+ in, out := &in.GroupsClaim, &out.GroupsClaim
+ *out = new(string)
+ **out = **in
+ }
+ if in.GroupsPrefix != nil {
+ in, out := &in.GroupsPrefix, &out.GroupsPrefix
+ *out = new(string)
+ **out = **in
+ }
+ if in.IssuerURL != nil {
+ in, out := &in.IssuerURL, &out.IssuerURL
+ *out = new(string)
+ **out = **in
+ }
+ if in.RequiredClaims != nil {
+ in, out := &in.RequiredClaims, &out.RequiredClaims
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SigningAlgs != nil {
+ in, out := &in.SigningAlgs, &out.SigningAlgs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsernameClaim != nil {
+ in, out := &in.UsernameClaim, &out.UsernameClaim
+ *out = new(string)
+ **out = **in
+ }
+ if in.UsernamePrefix != nil {
+ in, out := &in.UsernamePrefix, &out.UsernamePrefix
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig.
+func (in *OIDCConfig) DeepCopy() *OIDCConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) {
+ *out = *in
+ if in.ExtraConfig != nil {
+ in, out := &in.ExtraConfig, &out.ExtraConfig
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication.
+func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDConnectClientAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Plant) DeepCopyInto(out *Plant) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plant.
+func (in *Plant) DeepCopy() *Plant {
+ if in == nil {
+ return nil
+ }
+ out := new(Plant)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Plant) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantList) DeepCopyInto(out *PlantList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Plant, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantList.
+func (in *PlantList) DeepCopy() *PlantList {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PlantList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantSpec) DeepCopyInto(out *PlantSpec) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]Endpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantSpec.
+func (in *PlantSpec) DeepCopy() *PlantSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantStatus) DeepCopyInto(out *PlantStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ObservedGeneration != nil {
+ in, out := &in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ClusterInfo != nil {
+ in, out := &in.ClusterInfo, &out.ClusterInfo
+ *out = new(ClusterInfo)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantStatus.
+func (in *PlantStatus) DeepCopy() *PlantStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Project) DeepCopyInto(out *Project) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
+func (in *Project) DeepCopy() *Project {
+ if in == nil {
+ return nil
+ }
+ out := new(Project)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Project) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectList) DeepCopyInto(out *ProjectList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
+func (in *ProjectList) DeepCopy() *ProjectList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectMember) DeepCopyInto(out *ProjectMember) {
+ *out = *in
+ out.Subject = in.Subject
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectMember.
+func (in *ProjectMember) DeepCopy() *ProjectMember {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectMember)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
+ *out = *in
+ if in.CreatedBy != nil {
+ in, out := &in.CreatedBy, &out.CreatedBy
+ *out = new(rbacv1.Subject)
+ **out = **in
+ }
+ if in.Description != nil {
+ in, out := &in.Description, &out.Description
+ *out = new(string)
+ **out = **in
+ }
+ if in.Owner != nil {
+ in, out := &in.Owner, &out.Owner
+ *out = new(rbacv1.Subject)
+ **out = **in
+ }
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(string)
+ **out = **in
+ }
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]ProjectMember, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Namespace != nil {
+ in, out := &in.Namespace, &out.Namespace
+ *out = new(string)
+ **out = **in
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = new(ProjectTolerations)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
+func (in *ProjectSpec) DeepCopy() *ProjectSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
+ *out = *in
+ if in.StaleSinceTimestamp != nil {
+ in, out := &in.StaleSinceTimestamp, &out.StaleSinceTimestamp
+ *out = (*in).DeepCopy()
+ }
+ if in.StaleAutoDeleteTimestamp != nil {
+ in, out := &in.StaleAutoDeleteTimestamp, &out.StaleAutoDeleteTimestamp
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
+func (in *ProjectStatus) DeepCopy() *ProjectStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectTolerations) DeepCopyInto(out *ProjectTolerations) {
+ *out = *in
+ if in.Defaults != nil {
+ in, out := &in.Defaults, &out.Defaults
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Whitelist != nil {
+ in, out := &in.Whitelist, &out.Whitelist
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectTolerations.
+func (in *ProjectTolerations) DeepCopy() *ProjectTolerations {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectTolerations)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Provider) DeepCopyInto(out *Provider) {
+ *out = *in
+ if in.ControlPlaneConfig != nil {
+ in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.InfrastructureConfig != nil {
+ in, out := &in.InfrastructureConfig, &out.InfrastructureConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]Worker, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider.
+func (in *Provider) DeepCopy() *Provider {
+ if in == nil {
+ return nil
+ }
+ out := new(Provider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quota) DeepCopyInto(out *Quota) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota.
+func (in *Quota) DeepCopy() *Quota {
+ if in == nil {
+ return nil
+ }
+ out := new(Quota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Quota) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaList) DeepCopyInto(out *QuotaList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Quota, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaList.
+func (in *QuotaList) DeepCopy() *QuotaList {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *QuotaList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) {
+ *out = *in
+ if in.ClusterLifetimeDays != nil {
+ in, out := &in.ClusterLifetimeDays, &out.ClusterLifetimeDays
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Metrics != nil {
+ in, out := &in.Metrics, &out.Metrics
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ out.Scope = in.Scope
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec.
+func (in *QuotaSpec) DeepCopy() *QuotaSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Region) DeepCopyInto(out *Region) {
+ *out = *in
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]AvailabilityZone, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region.
+func (in *Region) DeepCopy() *Region {
+ if in == nil {
+ return nil
+ }
+ out := new(Region)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceData) DeepCopyInto(out *ResourceData) {
+ *out = *in
+ out.CrossVersionObjectReference = in.CrossVersionObjectReference
+ in.Data.DeepCopyInto(&out.Data)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceData.
+func (in *ResourceData) DeepCopy() *ResourceData {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceData)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceWatchCacheSize) DeepCopyInto(out *ResourceWatchCacheSize) {
+ *out = *in
+ if in.APIGroup != nil {
+ in, out := &in.APIGroup, &out.APIGroup
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceWatchCacheSize.
+func (in *ResourceWatchCacheSize) DeepCopy() *ResourceWatchCacheSize {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceWatchCacheSize)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBinding) DeepCopyInto(out *SecretBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.SecretRef = in.SecretRef
+ if in.Quotas != nil {
+ in, out := &in.Quotas, &out.Quotas
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding.
+func (in *SecretBinding) DeepCopy() *SecretBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBindingList) DeepCopyInto(out *SecretBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]SecretBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBindingList.
+func (in *SecretBindingList) DeepCopy() *SecretBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Seed) DeepCopyInto(out *Seed) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seed.
+func (in *Seed) DeepCopy() *Seed {
+ if in == nil {
+ return nil
+ }
+ out := new(Seed)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Seed) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedBackup) DeepCopyInto(out *SeedBackup) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Region != nil {
+ in, out := &in.Region, &out.Region
+ *out = new(string)
+ **out = **in
+ }
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedBackup.
+func (in *SeedBackup) DeepCopy() *SeedBackup {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedBackup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedDNS) DeepCopyInto(out *SeedDNS) {
+ *out = *in
+ if in.IngressDomain != nil {
+ in, out := &in.IngressDomain, &out.IngressDomain
+ *out = new(string)
+ **out = **in
+ }
+ if in.Provider != nil {
+ in, out := &in.Provider, &out.Provider
+ *out = new(SeedDNSProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS.
+func (in *SeedDNS) DeepCopy() *SeedDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedDNSProvider) DeepCopyInto(out *SeedDNSProvider) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNSProvider.
+func (in *SeedDNSProvider) DeepCopy() *SeedDNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedDNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedList) DeepCopyInto(out *SeedList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Seed, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedList.
+func (in *SeedList) DeepCopy() *SeedList {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SeedList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedNetworks) DeepCopyInto(out *SeedNetworks) {
+ *out = *in
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = new(string)
+ **out = **in
+ }
+ if in.ShootDefaults != nil {
+ in, out := &in.ShootDefaults, &out.ShootDefaults
+ *out = new(ShootNetworks)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedNetworks.
+func (in *SeedNetworks) DeepCopy() *SeedNetworks {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedNetworks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedProvider) DeepCopyInto(out *SeedProvider) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedProvider.
+func (in *SeedProvider) DeepCopy() *SeedProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSelector) DeepCopyInto(out *SeedSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ProviderTypes != nil {
+ in, out := &in.ProviderTypes, &out.ProviderTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSelector.
+func (in *SeedSelector) DeepCopy() *SeedSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingExcessCapacityReservation) DeepCopyInto(out *SeedSettingExcessCapacityReservation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingExcessCapacityReservation.
+func (in *SeedSettingExcessCapacityReservation) DeepCopy() *SeedSettingExcessCapacityReservation {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingExcessCapacityReservation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingLoadBalancerServices) DeepCopyInto(out *SeedSettingLoadBalancerServices) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingLoadBalancerServices.
+func (in *SeedSettingLoadBalancerServices) DeepCopy() *SeedSettingLoadBalancerServices {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingLoadBalancerServices)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingScheduling) DeepCopyInto(out *SeedSettingScheduling) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingScheduling.
+func (in *SeedSettingScheduling) DeepCopy() *SeedSettingScheduling {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingScheduling)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingShootDNS) DeepCopyInto(out *SeedSettingShootDNS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingShootDNS.
+func (in *SeedSettingShootDNS) DeepCopy() *SeedSettingShootDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingShootDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingVerticalPodAutoscaler) DeepCopyInto(out *SeedSettingVerticalPodAutoscaler) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingVerticalPodAutoscaler.
+func (in *SeedSettingVerticalPodAutoscaler) DeepCopy() *SeedSettingVerticalPodAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingVerticalPodAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettings) DeepCopyInto(out *SeedSettings) {
+ *out = *in
+ if in.ExcessCapacityReservation != nil {
+ in, out := &in.ExcessCapacityReservation, &out.ExcessCapacityReservation
+ *out = new(SeedSettingExcessCapacityReservation)
+ **out = **in
+ }
+ if in.Scheduling != nil {
+ in, out := &in.Scheduling, &out.Scheduling
+ *out = new(SeedSettingScheduling)
+ **out = **in
+ }
+ if in.ShootDNS != nil {
+ in, out := &in.ShootDNS, &out.ShootDNS
+ *out = new(SeedSettingShootDNS)
+ **out = **in
+ }
+ if in.LoadBalancerServices != nil {
+ in, out := &in.LoadBalancerServices, &out.LoadBalancerServices
+ *out = new(SeedSettingLoadBalancerServices)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VerticalPodAutoscaler != nil {
+ in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler
+ *out = new(SeedSettingVerticalPodAutoscaler)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettings.
+func (in *SeedSettings) DeepCopy() *SeedSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSpec) DeepCopyInto(out *SeedSpec) {
+ *out = *in
+ if in.Backup != nil {
+ in, out := &in.Backup, &out.Backup
+ *out = new(SeedBackup)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BlockCIDRs != nil {
+ in, out := &in.BlockCIDRs, &out.BlockCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.DNS.DeepCopyInto(&out.DNS)
+ in.Networks.DeepCopyInto(&out.Networks)
+ in.Provider.DeepCopyInto(&out.Provider)
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]SeedTaint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(SeedVolume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(SeedSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = new(Ingress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSpec.
+func (in *SeedSpec) DeepCopy() *SeedSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedStatus) DeepCopyInto(out *SeedStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Gardener != nil {
+ in, out := &in.Gardener, &out.Gardener
+ *out = new(Gardener)
+ **out = **in
+ }
+ if in.KubernetesVersion != nil {
+ in, out := &in.KubernetesVersion, &out.KubernetesVersion
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClusterIdentity != nil {
+ in, out := &in.ClusterIdentity, &out.ClusterIdentity
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedStatus.
+func (in *SeedStatus) DeepCopy() *SeedStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedTaint) DeepCopyInto(out *SeedTaint) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTaint.
+func (in *SeedTaint) DeepCopy() *SeedTaint {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedTaint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedVolume) DeepCopyInto(out *SeedVolume) {
+ *out = *in
+ if in.MinimumSize != nil {
+ in, out := &in.MinimumSize, &out.MinimumSize
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Providers != nil {
+ in, out := &in.Providers, &out.Providers
+ *out = make([]SeedVolumeProvider, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolume.
+func (in *SeedVolume) DeepCopy() *SeedVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedVolumeProvider) DeepCopyInto(out *SeedVolumeProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolumeProvider.
+func (in *SeedVolumeProvider) DeepCopy() *SeedVolumeProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedVolumeProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) {
+ *out = *in
+ if in.Issuer != nil {
+ in, out := &in.Issuer, &out.Issuer
+ *out = new(string)
+ **out = **in
+ }
+ if in.SigningKeySecret != nil {
+ in, out := &in.SigningKeySecret, &out.SigningKeySecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig.
+func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Shoot) DeepCopyInto(out *Shoot) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot.
+func (in *Shoot) DeepCopy() *Shoot {
+ if in == nil {
+ return nil
+ }
+ out := new(Shoot)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Shoot) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootList) DeepCopyInto(out *ShootList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Shoot, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootList.
+func (in *ShootList) DeepCopy() *ShootList {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootMachineImage.
+func (in *ShootMachineImage) DeepCopy() *ShootMachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootMachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootNetworks) DeepCopyInto(out *ShootNetworks) {
+ *out = *in
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = new(string)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootNetworks.
+func (in *ShootNetworks) DeepCopy() *ShootNetworks {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootNetworks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootSpec) DeepCopyInto(out *ShootSpec) {
+ *out = *in
+ if in.Addons != nil {
+ in, out := &in.Addons, &out.Addons
+ *out = new(Addons)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = new(DNS)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Extensions != nil {
+ in, out := &in.Extensions, &out.Extensions
+ *out = make([]Extension, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Hibernation != nil {
+ in, out := &in.Hibernation, &out.Hibernation
+ *out = new(Hibernation)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Kubernetes.DeepCopyInto(&out.Kubernetes)
+ in.Networking.DeepCopyInto(&out.Networking)
+ if in.Maintenance != nil {
+ in, out := &in.Maintenance, &out.Maintenance
+ *out = new(Maintenance)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Monitoring != nil {
+ in, out := &in.Monitoring, &out.Monitoring
+ *out = new(Monitoring)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Provider.DeepCopyInto(&out.Provider)
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(ShootPurpose)
+ **out = **in
+ }
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(SeedSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]NamedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootSpec.
+func (in *ShootSpec) DeepCopy() *ShootSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootState) DeepCopyInto(out *ShootState) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootState.
+func (in *ShootState) DeepCopy() *ShootState {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootState)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootState) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStateList) DeepCopyInto(out *ShootStateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ShootState, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateList.
+func (in *ShootStateList) DeepCopy() *ShootStateList {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootStateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStateSpec) DeepCopyInto(out *ShootStateSpec) {
+ *out = *in
+ if in.Gardener != nil {
+ in, out := &in.Gardener, &out.Gardener
+ *out = make([]GardenerResourceData, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Extensions != nil {
+ in, out := &in.Extensions, &out.Extensions
+ *out = make([]ExtensionResourceState, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ResourceData, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSpec.
+func (in *ShootStateSpec) DeepCopy() *ShootStateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStatus) DeepCopyInto(out *ShootStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Constraints != nil {
+ in, out := &in.Constraints, &out.Constraints
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.Gardener = in.Gardener
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastErrors != nil {
+ in, out := &in.LastErrors, &out.LastErrors
+ *out = make([]LastError, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RetryCycleStartTime != nil {
+ in, out := &in.RetryCycleStartTime, &out.RetryCycleStartTime
+ *out = (*in).DeepCopy()
+ }
+ if in.Seed != nil {
+ in, out := &in.Seed, &out.Seed
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClusterIdentity != nil {
+ in, out := &in.ClusterIdentity, &out.ClusterIdentity
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStatus.
+func (in *ShootStatus) DeepCopy() *ShootStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Toleration) DeepCopyInto(out *Toleration) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration.
+func (in *Toleration) DeepCopy() *Toleration {
+ if in == nil {
+ return nil
+ }
+ out := new(Toleration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) {
+ *out = *in
+ if in.EvictAfterOOMThreshold != nil {
+ in, out := &in.EvictAfterOOMThreshold, &out.EvictAfterOOMThreshold
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.EvictionRateBurst != nil {
+ in, out := &in.EvictionRateBurst, &out.EvictionRateBurst
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EvictionRateLimit != nil {
+ in, out := &in.EvictionRateLimit, &out.EvictionRateLimit
+ *out = new(float64)
+ **out = **in
+ }
+ if in.EvictionTolerance != nil {
+ in, out := &in.EvictionTolerance, &out.EvictionTolerance
+ *out = new(float64)
+ **out = **in
+ }
+ if in.RecommendationMarginFraction != nil {
+ in, out := &in.RecommendationMarginFraction, &out.RecommendationMarginFraction
+ *out = new(float64)
+ **out = **in
+ }
+ if in.UpdaterInterval != nil {
+ in, out := &in.UpdaterInterval, &out.UpdaterInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.RecommenderInterval != nil {
+ in, out := &in.RecommenderInterval, &out.RecommenderInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler.
+func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(VerticalPodAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+ if in == nil {
+ return nil
+ }
+ out := new(Volume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeType) DeepCopyInto(out *VolumeType) {
+ *out = *in
+ if in.Usable != nil {
+ in, out := &in.Usable, &out.Usable
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeType.
+func (in *VolumeType) DeepCopy() *VolumeType {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WatchCacheSizes) DeepCopyInto(out *WatchCacheSizes) {
+ *out = *in
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ResourceWatchCacheSize, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchCacheSizes.
+func (in *WatchCacheSizes) DeepCopy() *WatchCacheSizes {
+ if in == nil {
+ return nil
+ }
+ out := new(WatchCacheSizes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Worker) DeepCopyInto(out *Worker) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ if in.CRI != nil {
+ in, out := &in.CRI, &out.CRI
+ *out = new(CRI)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubernetes != nil {
+ in, out := &in.Kubernetes, &out.Kubernetes
+ *out = new(WorkerKubernetes)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Machine.DeepCopyInto(&out.Machine)
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]v1.Taint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(Volume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DataVolumes != nil {
+ in, out := &in.DataVolumes, &out.DataVolumes
+ *out = make([]DataVolume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.KubeletDataVolumeName != nil {
+ in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SystemComponents != nil {
+ in, out := &in.SystemComponents, &out.SystemComponents
+ *out = new(WorkerSystemComponents)
+ **out = **in
+ }
+ if in.MachineControllerManagerSettings != nil {
+ in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings
+ *out = new(MachineControllerManagerSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker.
+func (in *Worker) DeepCopy() *Worker {
+ if in == nil {
+ return nil
+ }
+ out := new(Worker)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerKubernetes) DeepCopyInto(out *WorkerKubernetes) {
+ *out = *in
+ if in.Kubelet != nil {
+ in, out := &in.Kubelet, &out.Kubelet
+ *out = new(KubeletConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerKubernetes.
+func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerKubernetes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerSystemComponents) DeepCopyInto(out *WorkerSystemComponents) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSystemComponents.
+func (in *WorkerSystemComponents) DeepCopy() *WorkerSystemComponents {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerSystemComponents)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go
new file mode 100644
index 0000000..0c33538
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go
@@ -0,0 +1,138 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&CloudProfile{}, func(obj interface{}) { SetObjectDefaults_CloudProfile(obj.(*CloudProfile)) })
+ scheme.AddTypeDefaultingFunc(&CloudProfileList{}, func(obj interface{}) { SetObjectDefaults_CloudProfileList(obj.(*CloudProfileList)) })
+ scheme.AddTypeDefaultingFunc(&ControllerRegistration{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistration(obj.(*ControllerRegistration)) })
+ scheme.AddTypeDefaultingFunc(&ControllerRegistrationList{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistrationList(obj.(*ControllerRegistrationList)) })
+ scheme.AddTypeDefaultingFunc(&Project{}, func(obj interface{}) { SetObjectDefaults_Project(obj.(*Project)) })
+ scheme.AddTypeDefaultingFunc(&ProjectList{}, func(obj interface{}) { SetObjectDefaults_ProjectList(obj.(*ProjectList)) })
+ scheme.AddTypeDefaultingFunc(&SecretBinding{}, func(obj interface{}) { SetObjectDefaults_SecretBinding(obj.(*SecretBinding)) })
+ scheme.AddTypeDefaultingFunc(&SecretBindingList{}, func(obj interface{}) { SetObjectDefaults_SecretBindingList(obj.(*SecretBindingList)) })
+ scheme.AddTypeDefaultingFunc(&Seed{}, func(obj interface{}) { SetObjectDefaults_Seed(obj.(*Seed)) })
+ scheme.AddTypeDefaultingFunc(&SeedList{}, func(obj interface{}) { SetObjectDefaults_SeedList(obj.(*SeedList)) })
+ scheme.AddTypeDefaultingFunc(&Shoot{}, func(obj interface{}) { SetObjectDefaults_Shoot(obj.(*Shoot)) })
+ scheme.AddTypeDefaultingFunc(&ShootList{}, func(obj interface{}) { SetObjectDefaults_ShootList(obj.(*ShootList)) })
+ return nil
+}
+
+func SetObjectDefaults_CloudProfile(in *CloudProfile) {
+ for i := range in.Spec.MachineTypes {
+ a := &in.Spec.MachineTypes[i]
+ SetDefaults_MachineType(a)
+ }
+ for i := range in.Spec.VolumeTypes {
+ a := &in.Spec.VolumeTypes[i]
+ SetDefaults_VolumeType(a)
+ }
+}
+
+func SetObjectDefaults_CloudProfileList(in *CloudProfileList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_CloudProfile(a)
+ }
+}
+
+func SetObjectDefaults_ControllerRegistration(in *ControllerRegistration) {
+ for i := range in.Spec.Resources {
+ a := &in.Spec.Resources[i]
+ SetDefaults_ControllerResource(a)
+ }
+ if in.Spec.Deployment != nil {
+ SetDefaults_ControllerDeployment(in.Spec.Deployment)
+ }
+}
+
+func SetObjectDefaults_ControllerRegistrationList(in *ControllerRegistrationList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_ControllerRegistration(a)
+ }
+}
+
+func SetObjectDefaults_Project(in *Project) {
+ SetDefaults_Project(in)
+}
+
+func SetObjectDefaults_ProjectList(in *ProjectList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Project(a)
+ }
+}
+
+func SetObjectDefaults_SecretBinding(in *SecretBinding) {
+ SetDefaults_SecretBinding(in)
+}
+
+func SetObjectDefaults_SecretBindingList(in *SecretBindingList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_SecretBinding(a)
+ }
+}
+
+func SetObjectDefaults_Seed(in *Seed) {
+ SetDefaults_Seed(in)
+}
+
+func SetObjectDefaults_SeedList(in *SeedList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Seed(a)
+ }
+}
+
+func SetObjectDefaults_Shoot(in *Shoot) {
+ SetDefaults_Shoot(in)
+ if in.Spec.Addons != nil {
+ if in.Spec.Addons.NginxIngress != nil {
+ SetDefaults_NginxIngress(in.Spec.Addons.NginxIngress)
+ }
+ }
+ if in.Spec.Kubernetes.VerticalPodAutoscaler != nil {
+ SetDefaults_VerticalPodAutoscaler(in.Spec.Kubernetes.VerticalPodAutoscaler)
+ }
+ if in.Spec.Maintenance != nil {
+ SetDefaults_Maintenance(in.Spec.Maintenance)
+ }
+ for i := range in.Spec.Provider.Workers {
+ a := &in.Spec.Provider.Workers[i]
+ SetDefaults_Worker(a)
+ }
+}
+
+func SetObjectDefaults_ShootList(in *ShootList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Shoot(a)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go
new file mode 100644
index 0000000..29645e7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package constants
+
+const (
+ // SecretNameCACluster is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of a shoot cluster.
+ SecretNameCACluster = "ca"
+ // SecretNameCAETCD is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the etcd of a shoot cluster.
+ SecretNameCAETCD = "ca-etcd"
+ // SecretNameCAFrontProxy is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the kube-aggregator a shoot cluster.
+ SecretNameCAFrontProxy = "ca-front-proxy"
+ // SecretNameCAKubelet is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the kubelet of a shoot cluster.
+ SecretNameCAKubelet = "ca-kubelet"
+ // SecretNameCAMetricsServer is a constant for the name of a Kubernetes secret object that contains the CA
+ // certificate of the metrics-server of a shoot cluster.
+ SecretNameCAMetricsServer = "ca-metrics-server"
+ // SecretNameCloudProvider is a constant for the name of a Kubernetes secret object that contains the provider
+ // specific credentials that shall be used to create/delete the shoot.
+ SecretNameCloudProvider = "cloudprovider"
+ // SecretNameSSHKeyPair is a constant for the name of a Kubernetes secret object that contains the SSH key pair
+ // (public and private key) that can be used to SSH into the shoot nodes.
+ SecretNameSSHKeyPair = "ssh-keypair"
+ // SecretNameServiceAccountKey is a constant for the name of a Kubernetes secret object that contains a
+ // PEM-encoded private RSA or ECDSA key used by the Kube Controller Manager to sign service account tokens
+ SecretNameServiceAccountKey = "service-account-key"
+
+ // SecretNameGardener is a constant for the name of a Kubernetes secret object that contains the client
+ // certificate and a kubeconfig for a shoot cluster. It is used by Gardener and can be used by extension
+ // controllers in order to communicate with the shoot's API server. The client certificate has administrator
+ // privileges.
+ SecretNameGardener = "gardener"
+ // SecretNameGardenerInternal is a constant for the name of a Kubernetes secret object that contains the client
+ // certificate and a kubeconfig for a shoot cluster. It is used by Gardener and can be used by extension
+ // controllers in order to communicate with the shoot's API server. The client certificate has administrator
+ // privileges. The difference to the "gardener" secret is that is contains the in-cluster endpoint as address to
+ // for the shoot API server instead the DNS name or load balancer address.
+ SecretNameGardenerInternal = "gardener-internal"
+
+ // DeploymentNameClusterAutoscaler is a constant for the name of a Kubernetes deployment object that contains
+ // the cluster-autoscaler pod.
+ DeploymentNameClusterAutoscaler = "cluster-autoscaler"
+ // DeploymentNameKubeAPIServer is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-apiserver pod.
+ DeploymentNameKubeAPIServer = "kube-apiserver"
+ // DeploymentNameKubeControllerManager is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-controller-manager pod.
+ DeploymentNameKubeControllerManager = "kube-controller-manager"
+
+ // DeploymentNameKubeScheduler is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-scheduler pod.
+ DeploymentNameKubeScheduler = "kube-scheduler"
+ // DeploymentNameGardenerResourceManager is a constant for the name of a Kubernetes deployment object that contains
+ // the gardener-resource-manager pod.
+ DeploymentNameGardenerResourceManager = "gardener-resource-manager"
+ // DeploymentNameGrafanaOperators is a constant for the name of a Kubernetes deployment object that contains
+ // the grafana-operators pod.
+ DeploymentNameGrafanaOperators = "grafana-operators"
+ // DeploymentNameGrafanaUsers is a constant for the name of a Kubernetes deployment object that contains
+ // the grafana-users pod.
+ DeploymentNameGrafanaUsers = "grafana-users"
+ // DeploymentNameKubeStateMetricsShoot is a constant for the name of a Kubernetes deployment object that contains
+ // the kube-state-metrics pod.
+ DeploymentNameKubeStateMetricsShoot = "kube-state-metrics"
+
+ // DeploymentNameVPAAdmissionController is a constant for the name of the VPA admission controller deployment.
+ DeploymentNameVPAAdmissionController = "vpa-admission-controller"
+ // DeploymentNameVPAExporter is a constant for the name of the VPA exporter deployment.
+ DeploymentNameVPAExporter = "vpa-exporter"
+ // DeploymentNameVPARecommender is a constant for the name of the VPA recommender deployment.
+ DeploymentNameVPARecommender = "vpa-recommender"
+ // DeploymentNameVPAUpdater is a constant for the name of the VPA updater deployment.
+ DeploymentNameVPAUpdater = "vpa-updater"
+
+ // StatefulSetNameAlertManager is a constant for the name of a Kubernetes stateful set object that contains
+ // the alertmanager pod.
+ StatefulSetNameAlertManager = "alertmanager"
+ // ETCDRoleMain is a constant for the main etcd role.
+ ETCDRoleMain = "main"
+ // ETCDRoleEvents is a constant for the events etcd role.
+ ETCDRoleEvents = "events"
+ // ETCDMain is a constant for the name of etcd-main Etcd object.
+ ETCDMain = "etcd-" + ETCDRoleMain
+ // ETCDEvents is a constant for the name of etcd-events Etcd object.
+ ETCDEvents = "etcd-" + ETCDRoleEvents
+ // StatefulSetNameLoki is a constant for the name of a Kubernetes stateful set object that contains
+ // the loki pod.
+ StatefulSetNameLoki = "loki"
+ // StatefulSetNamePrometheus is a constant for the name of a Kubernetes stateful set object that contains
+ // the prometheus pod.
+ StatefulSetNamePrometheus = "prometheus"
+
+ // GardenerPurpose is a constant for the key in a label describing the purpose of the respective object.
+ GardenerPurpose = "gardener.cloud/purpose"
+ // GardenerDescription is a constant for a key in an annotation describing what the resource is used for.
+ GardenerDescription = "gardener.cloud/description"
+
+ // GardenerOperation is a constant for an annotation on a resource that describes a desired operation.
+ GardenerOperation = "gardener.cloud/operation"
+ // GardenerOperationReconcile is a constant for the value of the operation annotation describing a reconcile
+ // operation.
+ GardenerOperationReconcile = "reconcile"
+ // GardenerTimestamp is a constant for an annotation on a resource that describes the timestamp when a reconciliation has been requested.
+ // It is only used to guarantee an update event for watching clients in case the operation-annotation is already present.
+ GardenerTimestamp = "gardener.cloud/timestamp"
+ // GardenerOperationMigrate is a constant for the value of the operation annotation describing a migration
+ // operation.
+ GardenerOperationMigrate = "migrate"
+ // GardenerOperationRestore is a constant for the value of the operation annotation describing a restoration
+ // operation.
+ GardenerOperationRestore = "restore"
+ // GardenerOperationWaitForState is a constant for the value of the operation annotation describing a wait
+ // operation.
+ GardenerOperationWaitForState = "wait-for-state"
+
+ // DeprecatedGardenRole is the key for an annotation on a Kubernetes object indicating what it is used for.
+ //
+ // Deprecated: Use `GardenRole` instead.
+ DeprecatedGardenRole = "garden.sapcloud.io/role"
+ // GardenRole is a constant for a label that describes a role.
+ GardenRole = "gardener.cloud/role"
+ // GardenRoleExtension is a constant for a label that describes the 'extensions' role.
+ GardenRoleExtension = "extension"
+ // GardenRoleSeed is the value of the GardenRole key indicating type 'seed'.
+ GardenRoleSeed = "seed"
+ // GardenRoleShoot is the value of the GardenRole key indicating type 'shoot'.
+ GardenRoleShoot = "shoot"
+ // GardenRoleLogging is the value of the GardenRole key indicating type 'logging'.
+ GardenRoleLogging = "logging"
+ // GardenRoleProject is the value of GardenRole key indicating type 'project'.
+ GardenRoleProject = "project"
+ // GardenRoleControlPlane is the value of the GardenRole key indicating type 'controlplane'.
+ GardenRoleControlPlane = "controlplane"
+ // GardenRoleSystemComponent is the value of the GardenRole key indicating type 'system-component'.
+ GardenRoleSystemComponent = "system-component"
+ // GardenRoleMonitoring is the value of the GardenRole key indicating type 'monitoring'.
+ GardenRoleMonitoring = "monitoring"
+ // GardenRoleOptionalAddon is the value of the GardenRole key indicating type 'optional-addon'.
+ GardenRoleOptionalAddon = "optional-addon"
+ // GardenRoleKubeconfig is the value of the GardenRole key indicating type 'kubeconfig'.
+ GardenRoleKubeconfig = "kubeconfig"
+ // GardenRoleSSHKeyPair is the value of the GardenRole key indicating type 'ssh-keypair'.
+ GardenRoleSSHKeyPair = "ssh-keypair"
+
+ // DeprecatedShootUID is an annotation key for the shoot namespace in the seed cluster,
+ // which value will be the value of `shoot.status.uid`
+ //
+ // Deprecated: Use the `Cluster` resource or the annotation key from the new API group `ShootUID`.
+ DeprecatedShootUID = "shoot.garden.sapcloud.io/uid"
+ // ShootUID is an annotation key for the shoot namespace in the seed cluster,
+ // which value will be the value of `shoot.status.uid`
+ ShootUID = "shoot.gardener.cloud/uid"
+
+ // SeedResourceManagerClass is the resource-class managed by the Gardener-Resource-Manager
+ // instance in the garden namespace on the seeds.
+ SeedResourceManagerClass = "seed"
+ // LabelBackupProvider is used to identify the backup provider.
+ LabelBackupProvider = "backup.gardener.cloud/provider"
+ // LabelSeedProvider is used to identify the seed provider.
+ LabelSeedProvider = "seed.gardener.cloud/provider"
+ // LabelShootProvider is used to identify the shoot provider.
+ LabelShootProvider = "shoot.gardener.cloud/provider"
+ // LabelNetworkingProvider is used to identify the networking provider for the cni plugin.
+ LabelNetworkingProvider = "networking.shoot.gardener.cloud/provider"
+ // LabelExtensionConfiguration is used to identify the provider's configuration which will be added to Gardener configuration
+ LabelExtensionConfiguration = "extensions.gardener.cloud/configuration"
+ // LabelLogging is a constant for a label for logging stack configurations
+ LabelLogging = "logging"
+ // LabelMonitoring is a constant for a label for monitoring stack configurations
+ LabelMonitoring = "monitoring"
+
+ // LabelNetworkPolicyToBlockedCIDRs allows Egress from pods labeled with 'networking.gardener.cloud/to-blocked-cidrs=allowed'.
+ LabelNetworkPolicyToBlockedCIDRs = "networking.gardener.cloud/to-blocked-cidrs"
+ // LabelNetworkPolicyToDNS allows Egress from pods labeled with 'networking.gardener.cloud/to-dns=allowed' to DNS running in 'kube-system'.
+ // In practice, most of the Pods which require network Egress need this label.
+ LabelNetworkPolicyToDNS = "networking.gardener.cloud/to-dns"
+ // LabelNetworkPolicyToPrivateNetworks allows Egress from pods labeled with 'networking.gardener.cloud/to-private-networks=allowed' to the
+ // private networks (RFC1918), Carrier-grade NAT (RFC6598) except for cloudProvider's specific metadata service IP, seed networks,
+ // shoot networks.
+ LabelNetworkPolicyToPrivateNetworks = "networking.gardener.cloud/to-private-networks"
+ // LabelNetworkPolicyToPublicNetworks allows Egress from pods labeled with 'networking.gardener.cloud/to-public-networks=allowed' to all public
+ // network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), cloudProvider's specific metadata service IP.
+ // In practice, this blocks Egress traffic to all networks in the Seed cluster and only traffic to public IPv4 addresses.
+ LabelNetworkPolicyToPublicNetworks = "networking.gardener.cloud/to-public-networks"
+ // LabelNetworkPolicyToSeedAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-seed-apiserver=allowed' to Seed's Kubernetes
+ // API Server.
+ LabelNetworkPolicyToSeedAPIServer = "networking.gardener.cloud/to-seed-apiserver"
+ // LabelNetworkPolicyToShootAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-shoot-apiserver=allowed' to talk to Shoot's
+ // Kubernetes API Server.
+ LabelNetworkPolicyToShootAPIServer = "networking.gardener.cloud/to-shoot-apiserver"
+ // LabelNetworkPolicyFromShootAPIServer allows Egress from Shoot's Kubernetes API Server to talk to pods labeled with
+ // 'networking.gardener.cloud/from-shoot-apiserver=allowed'.
+ LabelNetworkPolicyFromShootAPIServer = "networking.gardener.cloud/from-shoot-apiserver"
+ // LabelNetworkPolicyToAll disables all Ingress and Egress traffic into/from this namespace when set to "disallowed".
+ LabelNetworkPolicyToAll = "networking.gardener.cloud/to-all"
+ // LabelNetworkPolicyFromPrometheus allows Ingress from Prometheus to pods labeled with 'networking.gardener.cloud/from-prometheus=allowed' and ports
+ // named 'metrics' in the PodSpecification.
+ LabelNetworkPolicyFromPrometheus = "networking.gardener.cloud/from-prometheus"
+ // LabelNetworkPolicyShootFromSeed allows Ingress traffic from the seed cluster (where the shoot's kube-apiserver
+ // runs).
+ LabelNetworkPolicyShootFromSeed = "networking.gardener.cloud/from-seed"
+ // LabelNetworkPolicyShootToAPIServer allows Egress traffic to the shoot's API server.
+ LabelNetworkPolicyShootToAPIServer = "networking.gardener.cloud/to-apiserver"
+ // LabelNetworkPolicyShootToKubelet allows Egress traffic to the kubelets.
+ LabelNetworkPolicyShootToKubelet = "networking.gardener.cloud/to-kubelet"
+ // LabelNetworkPolicyAllowed is a constant for allowing a network policy.
+ LabelNetworkPolicyAllowed = "allowed"
+ // LabelNetworkPolicyDisallowed is a constant for disallowing a network policy.
+ LabelNetworkPolicyDisallowed = "disallowed"
+
+ // LabelApp is a constant for a label key.
+ LabelApp = "app"
+ // LabelRole is a constant for a label key.
+ LabelRole = "role"
+ // LabelKubernetes is a constant for a label for Kubernetes workload.
+ LabelKubernetes = "kubernetes"
+ // LabelAPIServer is a constant for a label for the kube-apiserver.
+ LabelAPIServer = "apiserver"
+ // LabelControllerManager is a constant for a label for the kube-controller-manager.
+ LabelControllerManager = "controller-manager"
+ // LabelScheduler is a constant for a label for the kube-scheduler.
+ LabelScheduler = "scheduler"
+ // LabelExtensionProjectRole is a constant for a label value for extension project roles
+ LabelExtensionProjectRole = "extension-project-role"
+
+ // LabelAPIServerExposure is a constant for label key which gardener can add to various objects related
+ // to kube-apiserver exposure.
+ LabelAPIServerExposure = "core.gardener.cloud/apiserver-exposure"
+ // LabelAPIServerExposureGardenerManaged is a constant for label value which gardener sets on the label key
+ // "core.gardener.cloud/apiserver-exposure" to indicate that it's responsible for apiserver exposure (via SNI).
+ LabelAPIServerExposureGardenerManaged = "gardener-managed"
+
+ // GardenNamespace is the namespace in which the configuration and secrets for
+ // the Gardener controller manager will be stored (e.g., secrets for the Seed clusters).
+ // It is also used by the gardener-apiserver.
+ GardenNamespace = "garden"
+
+ // AnnotationShootUseAsSeed is a constant for an annotation on a Shoot resource indicating that the Shoot shall be registered as Seed in the
+ // Garden cluster once successfully created.
+ AnnotationShootUseAsSeed = "shoot.gardener.cloud/use-as-seed"
+ // AnnotationShootIgnoreAlerts is the key for an annotation of a Shoot cluster whose value indicates
+ // if alerts for this cluster should be ignored
+ AnnotationShootIgnoreAlerts = "shoot.gardener.cloud/ignore-alerts"
+ // AnnotationShootSkipCleanup is a key for an annotation on a Shoot resource that declares that the clean up steps should be skipped when the
+ // cluster is deleted. Concretely, this will skip everything except the deletion of (load balancer) services and persistent volume resources.
+ AnnotationShootSkipCleanup = "shoot.gardener.cloud/skip-cleanup"
+ // AnnotationShootKonnectivityTunnel is the key for an annotation of a Shoot cluster whose value indicates
+ // if a konnectivity-tunnel should be deployed into the shoot cluster or not.
+ AnnotationShootKonnectivityTunnel = "alpha.featuregates.shoot.gardener.cloud/konnectivity-tunnel"
+
+ // AnnotationShootAPIServerSNIPodInjector is the key for an annotation of a Shoot cluster whose value indicates
+ // if pod injection of 'KUBERNETES_SERVICE_HOST' environment variable should happen for clusters where APIServerSNI
+ // featuregate is enabled.
+ // Any value than 'disable' enables this feature.
+ AnnotationShootAPIServerSNIPodInjector = "alpha.featuregates.shoot.gardener.cloud/apiserver-sni-pod-injector"
+ // AnnotationShootAPIServerSNIPodInjectorDisableValue is the value of the
+ // `alpha.featuregates.shoot.gardener.cloud/apiserver-sni-pod-injector` annotation that disables the pod injection.
+ AnnotationShootAPIServerSNIPodInjectorDisableValue = "disable"
+
+ // OperatingSystemConfigUnitNameKubeletService is a constant for a unit in the operating system config that contains the kubelet service.
+ OperatingSystemConfigUnitNameKubeletService = "kubelet.service"
+ // OperatingSystemConfigUnitNameDockerService is a constant for a unit in the operating system config that contains the docker service.
+ OperatingSystemConfigUnitNameDockerService = "docker.service"
+ // OperatingSystemConfigUnitNameContainerDService is a constant for a unit in the operating system config that contains the containerd service.
+ OperatingSystemConfigUnitNameContainerDService = "containerd.service"
+ // OperatingSystemConfigFilePathKernelSettings is a constant for a path to a file in the operating system config that contains some general kernel settings.
+ OperatingSystemConfigFilePathKernelSettings = "/etc/sysctl.d/99-k8s-general.conf"
+ // OperatingSystemConfigFilePathKubeletConfig is a constant for a path to a file in the operating system config that contains the kubelet configuration.
+ OperatingSystemConfigFilePathKubeletConfig = "/var/lib/kubelet/config/kubelet"
+
+ // FluentBitConfigMapKubernetesFilter is a constant for the Fluent Bit ConfigMap's section regarding Kubernetes filters
+ FluentBitConfigMapKubernetesFilter = "filter-kubernetes.conf"
+ // FluentBitConfigMapParser is a constant for the Fluent Bit ConfigMap's section regarding Parsers for common container types
+ FluentBitConfigMapParser = "parsers.conf"
+ // PrometheusConfigMapAlertingRules is a constant for the Prometheus alerting rules tag in provider-specific monitoring configuration
+ PrometheusConfigMapAlertingRules = "alerting_rules"
+ // PrometheusConfigMapScrapeConfig is a constant for the Prometheus scrape config tag in provider-specific monitoring configuration
+ PrometheusConfigMapScrapeConfig = "scrape_config"
+ // GrafanaConfigMapUserDashboard is a constant for the Grafana user dashboard tag in provider-specific monitoring configuration
+ GrafanaConfigMapUserDashboard = "dashboard_users"
+ // GrafanaConfigMapOperatorDashboard is a constant for the Grafana operator dashboard tag in provider-specific monitoring configuration
+ GrafanaConfigMapOperatorDashboard = "dashboard_operators"
+
+ // LabelControllerRegistrationName is the key of a label on extension namespaces that indicates the controller registration name.
+ LabelControllerRegistrationName = "controllerregistration.core.gardener.cloud/name"
+ // LabelPodMaintenanceRestart is a constant for a label that describes that a pod should be restarted during maintenance.
+ LabelPodMaintenanceRestart = "maintenance.gardener.cloud/restart"
+ // LabelWorkerPool is a constant for a label that indicates the worker pool the node belongs to
+ LabelWorkerPool = "worker.gardener.cloud/pool"
+ // LabelWorkerPoolDeprecated is a deprecated constant for a label that indicates the worker pool the node belongs to
+ LabelWorkerPoolDeprecated = "worker.garden.sapcloud.io/group"
+ // LabelWorkerPoolSystemComponents is a constant that indicates whether the worker pool should host system components
+ LabelWorkerPoolSystemComponents = "worker.gardener.cloud/system-components"
+
+ // EventResourceReferenced indicates that the resource deletion is in waiting mode because the resource is still
+ // being referenced by at least one other resource (e.g. a SecretBinding is still referenced by a Shoot)
+ EventResourceReferenced = "ResourceReferenced"
+
+ // PriorityClassNameShootControlPlane is the name of a priority class for critical pods of a shoot control plane.
+ PriorityClassNameShootControlPlane = "gardener-shoot-controlplane"
+
+ // ReferencedResourcesPrefix is the prefix used when copying referenced resources to the Shoot namespace in the Seed,
+ // to avoid naming collisions with resources managed by Gardener.
+ ReferencedResourcesPrefix = "ref-"
+
+ // ClusterIdentity is a constant equal to the name and data key (that stores the identity) of the cluster-identity ConfigMap
+ ClusterIdentity = "cluster-identity"
+
+ // SeedNginxIngressClass defines the ingress class for the seed nginx ingress controller
+ SeedNginxIngressClass = "nginx-gardener"
+ // IngressKindNginx defines nginx as kind as managed Seed ingress
+ IngressKindNginx = "nginx"
+ // ShootNginxIngressClass defines the ingress class for the seed nginx ingress controller
+ ShootNginxIngressClass = "nginx"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go
new file mode 100644
index 0000000..dadb1ce
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package constants
+
+// GetShootVPADeploymentNames returns the names of all VPA related deployments related to shoot clusters.
+func GetShootVPADeploymentNames() []string {
+ return []string{
+ DeploymentNameVPAAdmissionController,
+ DeploymentNameVPARecommender,
+ DeploymentNameVPAUpdater,
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go
new file mode 100644
index 0000000..749ed91
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go
@@ -0,0 +1,203 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "fmt"
+
+ "github.com/gardener/gardener/pkg/apis/core"
+
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) error {
+ if err := scheme.AddFieldLabelConversionFunc(
+ SchemeGroupVersion.WithKind("BackupBucket"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", core.BackupBucketSeedName:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ },
+ ); err != nil {
+ return err
+ }
+
+ if err := scheme.AddFieldLabelConversionFunc(
+ SchemeGroupVersion.WithKind("BackupEntry"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", core.BackupEntrySeedName:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ },
+ ); err != nil {
+ return err
+ }
+
+ if err := scheme.AddFieldLabelConversionFunc(
+ SchemeGroupVersion.WithKind("ControllerInstallation"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", core.RegistrationRefName, core.SeedRefName:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ },
+ ); err != nil {
+ return err
+ }
+
+ if err := scheme.AddFieldLabelConversionFunc(
+ SchemeGroupVersion.WithKind("Shoot"),
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", core.ShootSeedName, core.ShootCloudProfileName, core.ShootStatusSeedName:
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ },
+ ); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error {
+ if err := autoConvert_v1beta1_ProjectSpec_To_core_ProjectSpec(in, out, s); err != nil {
+ return err
+ }
+
+ if owner := out.Owner; owner != nil {
+ outer:
+ for i, member := range out.Members {
+ if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind {
+ // add owner role to the current project's owner if not present
+ for _, role := range member.Roles {
+ if role == core.ProjectMemberOwner {
+ continue outer
+ }
+ }
+
+ out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner)
+ } else {
+ // delete owner role from all other members
+ out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner)
+ }
+ }
+ }
+
+ return nil
+}
+
+func Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error {
+ if err := autoConvert_core_ProjectSpec_To_v1beta1_ProjectSpec(in, out, s); err != nil {
+ return err
+ }
+
+ if owner := out.Owner; owner != nil {
+ outer:
+ for i, member := range out.Members {
+ if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind {
+ // add owner role to the current project's owner if not present
+ if member.Role == core.ProjectMemberOwner {
+ // remove it from owners list if present
+ out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner)
+ continue outer
+ }
+ for _, role := range member.Roles {
+ if role == ProjectMemberOwner {
+ continue outer
+ }
+ }
+
+ if out.Members[i].Role == "" {
+ out.Members[i].Role = core.ProjectMemberOwner
+ } else {
+ out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner)
+ }
+ } else {
+ // delete owner role from all other members
+ out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner)
+
+ if member.Role == ProjectMemberOwner {
+ if len(out.Members[i].Roles) == 0 {
+ out.Members[i].Role = ""
+ } else {
+ out.Members[i].Role = out.Members[i].Roles[0]
+ if len(out.Members[i].Roles) > 1 {
+ out.Members[i].Roles = out.Members[i].Roles[1:]
+ } else {
+ out.Members[i].Roles = nil
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func Convert_v1beta1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error {
+ if err := autoConvert_v1beta1_ProjectMember_To_core_ProjectMember(in, out, s); err != nil {
+ return err
+ }
+
+ if len(in.Role) == 0 {
+ return nil
+ }
+
+ // delete in.Role from out.Roles to make sure it gets added to the head
+ if len(out.Roles) > 0 {
+ out.Roles = removeRoleFromRoles(out.Roles, in.Role)
+ }
+
+ // add in.Role to the head of out.Roles
+ out.Roles = append([]string{in.Role}, out.Roles...)
+
+ return nil
+}
+
+func Convert_core_ProjectMember_To_v1beta1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error {
+ if err := autoConvert_core_ProjectMember_To_v1beta1_ProjectMember(in, out, s); err != nil {
+ return err
+ }
+
+ if len(in.Roles) > 0 {
+ out.Role = in.Roles[0]
+ out.Roles = in.Roles[1:]
+ }
+
+ return nil
+}
+
+func removeRoleFromRoles(roles []string, role string) []string {
+ var newRoles []string
+ for _, r := range roles {
+ if r != role {
+ newRoles = append(newRoles, r)
+ }
+ }
+ return newRoles
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go
new file mode 100644
index 0000000..fc30a75
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go
@@ -0,0 +1,361 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "math"
+ "time"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ "github.com/gardener/gardener/pkg/utils"
+ versionutils "github.com/gardener/gardener/pkg/utils/version"
+
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/utils/pointer"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+ return RegisterDefaults(scheme)
+}
+
+// SetDefaults_SecretBinding sets default values for SecretBinding objects.
+func SetDefaults_SecretBinding(obj *SecretBinding) {
+ if len(obj.SecretRef.Namespace) == 0 {
+ obj.SecretRef.Namespace = obj.Namespace
+ }
+
+ for i, quota := range obj.Quotas {
+ if len(quota.Namespace) == 0 {
+ obj.Quotas[i].Namespace = obj.Namespace
+ }
+ }
+}
+
+// SetDefaults_Project sets default values for Project objects.
+func SetDefaults_Project(obj *Project) {
+ defaultSubject(obj.Spec.Owner)
+
+ for i, member := range obj.Spec.Members {
+ defaultSubject(&obj.Spec.Members[i].Subject)
+
+ if len(member.Role) == 0 && len(member.Roles) == 0 {
+ obj.Spec.Members[i].Role = ProjectMemberViewer
+ }
+ }
+
+ if obj.Spec.Namespace != nil && *obj.Spec.Namespace == v1beta1constants.GardenNamespace {
+ if obj.Spec.Tolerations == nil {
+ obj.Spec.Tolerations = &ProjectTolerations{}
+ }
+ addTolerations(&obj.Spec.Tolerations.Whitelist, Toleration{Key: SeedTaintProtected})
+ addTolerations(&obj.Spec.Tolerations.Defaults, Toleration{Key: SeedTaintProtected})
+ }
+}
+
+func defaultSubject(obj *rbacv1.Subject) {
+ if obj != nil && len(obj.APIGroup) == 0 {
+ switch obj.Kind {
+ case rbacv1.ServiceAccountKind:
+ obj.APIGroup = ""
+ case rbacv1.UserKind:
+ obj.APIGroup = rbacv1.GroupName
+ case rbacv1.GroupKind:
+ obj.APIGroup = rbacv1.GroupName
+ }
+ }
+}
+
+// SetDefaults_MachineType sets default values for MachineType objects.
+func SetDefaults_MachineType(obj *MachineType) {
+ if obj.Usable == nil {
+ trueVar := true
+ obj.Usable = &trueVar
+ }
+}
+
+// SetDefaults_VolumeType sets default values for VolumeType objects.
+func SetDefaults_VolumeType(obj *VolumeType) {
+ if obj.Usable == nil {
+ trueVar := true
+ obj.Usable = &trueVar
+ }
+}
+
+// SetDefaults_Seed sets default values for Seed objects.
+func SetDefaults_Seed(obj *Seed) {
+ if obj.Spec.Settings == nil {
+ obj.Spec.Settings = &SeedSettings{}
+ }
+
+ if obj.Spec.Settings.ExcessCapacityReservation == nil {
+ obj.Spec.Settings.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{Enabled: true}
+ }
+
+ if obj.Spec.Settings.Scheduling == nil {
+ obj.Spec.Settings.Scheduling = &SeedSettingScheduling{Visible: true}
+ }
+
+ if obj.Spec.Settings.ShootDNS == nil {
+ obj.Spec.Settings.ShootDNS = &SeedSettingShootDNS{Enabled: true}
+ }
+
+ if obj.Spec.Settings.VerticalPodAutoscaler == nil {
+ obj.Spec.Settings.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{Enabled: true}
+ }
+}
+
+// SetDefaults_Shoot sets default values for Shoot objects.
+func SetDefaults_Shoot(obj *Shoot) {
+ k8sVersionLessThan116, _ := versionutils.CompareVersions(obj.Spec.Kubernetes.Version, "<", "1.16")
+ // Error is ignored here because we cannot do anything meaningful with it.
+ // k8sVersionLessThan116 will default to `false`.
+
+ if obj.Spec.Kubernetes.AllowPrivilegedContainers == nil {
+ obj.Spec.Kubernetes.AllowPrivilegedContainers = pointer.BoolPtr(true)
+ }
+
+ if obj.Spec.Kubernetes.KubeAPIServer == nil {
+ obj.Spec.Kubernetes.KubeAPIServer = &KubeAPIServerConfig{}
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication == nil {
+ if k8sVersionLessThan116 {
+ obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(true)
+ } else {
+ obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(false)
+ }
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.Requests == nil {
+ obj.Spec.Kubernetes.KubeAPIServer.Requests = &KubeAPIServerRequests{}
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight == nil {
+ obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight = pointer.Int32Ptr(400)
+ }
+ if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight == nil {
+ obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight = pointer.Int32Ptr(200)
+ }
+
+ if obj.Spec.Kubernetes.KubeControllerManager == nil {
+ obj.Spec.Kubernetes.KubeControllerManager = &KubeControllerManagerConfig{}
+ }
+ if obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize == nil {
+ obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize = calculateDefaultNodeCIDRMaskSize(obj.Spec.Kubernetes.Kubelet, obj.Spec.Provider.Workers)
+ }
+ if obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout == nil {
+ obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout = &metav1.Duration{Duration: 2 * time.Minute}
+ }
+
+ if obj.Spec.Kubernetes.KubeProxy == nil {
+ obj.Spec.Kubernetes.KubeProxy = &KubeProxyConfig{}
+ }
+ if obj.Spec.Kubernetes.KubeProxy.Mode == nil {
+ defaultProxyMode := ProxyModeIPTables
+ obj.Spec.Kubernetes.KubeProxy.Mode = &defaultProxyMode
+ }
+
+ if obj.Spec.Addons == nil {
+ obj.Spec.Addons = &Addons{}
+ }
+ if obj.Spec.Addons.KubernetesDashboard == nil {
+ obj.Spec.Addons.KubernetesDashboard = &KubernetesDashboard{}
+ }
+ if obj.Spec.Addons.KubernetesDashboard.AuthenticationMode == nil {
+ var defaultAuthMode string
+ if *obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication {
+ defaultAuthMode = KubernetesDashboardAuthModeBasic
+ } else {
+ defaultAuthMode = KubernetesDashboardAuthModeToken
+ }
+ obj.Spec.Addons.KubernetesDashboard.AuthenticationMode = &defaultAuthMode
+ }
+
+ if obj.Spec.Purpose == nil {
+ p := ShootPurposeEvaluation
+ obj.Spec.Purpose = &p
+ }
+
+ // In previous Gardener versions that weren't supporting tolerations, it was hard-coded to (only) allow shoots in the
+ // `garden` namespace to use seeds that had the 'protected' taint. In order to be backwards compatible, now with the
+ // introduction of tolerations, we add the 'protected' toleration to the garden namespace by default.
+ if obj.Namespace == v1beta1constants.GardenNamespace {
+ addTolerations(&obj.Spec.Tolerations, Toleration{Key: SeedTaintProtected})
+ }
+
+ if obj.Spec.Kubernetes.Kubelet == nil {
+ obj.Spec.Kubernetes.Kubelet = &KubeletConfig{}
+ }
+ if obj.Spec.Kubernetes.Kubelet.FailSwapOn == nil {
+ obj.Spec.Kubernetes.Kubelet.FailSwapOn = pointer.BoolPtr(true)
+ }
+
+ var (
+ kubeReservedMemory = resource.MustParse("1Gi")
+ kubeReservedCPU = resource.MustParse("80m")
+ kubeReservedPID = resource.MustParse("20k")
+
+ k8sVersionGreaterEqual115, _ = versionutils.CompareVersions(obj.Spec.Kubernetes.Version, ">=", "1.15")
+ )
+
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved == nil {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved = &KubeletConfigReserved{Memory: &kubeReservedMemory, CPU: &kubeReservedCPU}
+
+ if k8sVersionGreaterEqual115 {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID
+ }
+ } else {
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory == nil {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory = &kubeReservedMemory
+ }
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU == nil {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU = &kubeReservedCPU
+ }
+ if obj.Spec.Kubernetes.Kubelet.KubeReserved.PID == nil && k8sVersionGreaterEqual115 {
+ obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID
+ }
+ }
+
+ if obj.Spec.Maintenance == nil {
+ obj.Spec.Maintenance = &Maintenance{}
+ }
+}
+
+// SetDefaults_Maintenance sets default values for Maintenance objects.
+func SetDefaults_Maintenance(obj *Maintenance) {
+ if obj.AutoUpdate == nil {
+ obj.AutoUpdate = &MaintenanceAutoUpdate{
+ KubernetesVersion: true,
+ MachineImageVersion: true,
+ }
+ }
+
+ if obj.TimeWindow == nil {
+ mt := utils.RandomMaintenanceTimeWindow()
+ obj.TimeWindow = &MaintenanceTimeWindow{
+ Begin: mt.Begin().Formatted(),
+ End: mt.End().Formatted(),
+ }
+ }
+}
+
+// SetDefaults_VerticalPodAutoscaler sets default values for VerticalPodAutoscaler objects.
+func SetDefaults_VerticalPodAutoscaler(obj *VerticalPodAutoscaler) {
+ if obj.EvictAfterOOMThreshold == nil {
+ v := DefaultEvictAfterOOMThreshold
+ obj.EvictAfterOOMThreshold = &v
+ }
+ if obj.EvictionRateBurst == nil {
+ v := DefaultEvictionRateBurst
+ obj.EvictionRateBurst = &v
+ }
+ if obj.EvictionRateLimit == nil {
+ v := DefaultEvictionRateLimit
+ obj.EvictionRateLimit = &v
+ }
+ if obj.EvictionTolerance == nil {
+ v := DefaultEvictionTolerance
+ obj.EvictionTolerance = &v
+ }
+ if obj.RecommendationMarginFraction == nil {
+ v := DefaultRecommendationMarginFraction
+ obj.RecommendationMarginFraction = &v
+ }
+ if obj.UpdaterInterval == nil {
+ v := DefaultUpdaterInterval
+ obj.UpdaterInterval = &v
+ }
+ if obj.RecommenderInterval == nil {
+ v := DefaultRecommenderInterval
+ obj.RecommenderInterval = &v
+ }
+}
+
+// SetDefaults_Worker sets default values for Worker objects.
+func SetDefaults_Worker(obj *Worker) {
+ if obj.MaxSurge == nil {
+ obj.MaxSurge = &DefaultWorkerMaxSurge
+ }
+ if obj.MaxUnavailable == nil {
+ obj.MaxUnavailable = &DefaultWorkerMaxUnavailable
+ }
+ if obj.SystemComponents == nil {
+ obj.SystemComponents = &WorkerSystemComponents{
+ Allow: DefaultWorkerSystemComponentsAllow,
+ }
+ }
+}
+
+// SetDefaults_NginxIngress sets default values for NginxIngress objects.
+func SetDefaults_NginxIngress(obj *NginxIngress) {
+ if obj.ExternalTrafficPolicy == nil {
+ v := corev1.ServiceExternalTrafficPolicyTypeCluster
+ obj.ExternalTrafficPolicy = &v
+ }
+}
+
+// SetDefaults_ControllerResource sets default values for ControllerResource objects.
+func SetDefaults_ControllerResource(obj *ControllerResource) {
+ if obj.Primary == nil {
+ obj.Primary = pointer.BoolPtr(true)
+ }
+}
+
+// SetDefaults_ControllerDeployment sets default values for ControllerDeployment objects.
+func SetDefaults_ControllerDeployment(obj *ControllerDeployment) {
+ p := ControllerDeploymentPolicyOnDemand
+ if obj.Policy == nil {
+ obj.Policy = &p
+ }
+}
+
+// Helper functions
+
+func calculateDefaultNodeCIDRMaskSize(kubelet *KubeletConfig, workers []Worker) *int32 {
+ var maxPods int32 = 110 // default maxPods setting on kubelet
+
+ if kubelet != nil && kubelet.MaxPods != nil {
+ maxPods = *kubelet.MaxPods
+ }
+
+ for _, worker := range workers {
+ if worker.Kubernetes != nil && worker.Kubernetes.Kubelet != nil && worker.Kubernetes.Kubelet.MaxPods != nil && *worker.Kubernetes.Kubelet.MaxPods > maxPods {
+ maxPods = *worker.Kubernetes.Kubelet.MaxPods
+ }
+ }
+
+ // by having approximately twice as many available IP addresses as possible Pods, Kubernetes is able to mitigate IP address reuse as Pods are added to and removed from a node.
+ nodeCidrRange := int32(32 - int(math.Ceil(math.Log2(float64(maxPods*2)))))
+ return &nodeCidrRange
+}
+
+func addTolerations(tolerations *[]Toleration, additionalTolerations ...Toleration) {
+ existingTolerations := sets.NewString()
+ for _, toleration := range *tolerations {
+ existingTolerations.Insert(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value))
+ }
+
+ for _, toleration := range additionalTolerations {
+ if existingTolerations.Has(toleration.Key) {
+ continue
+ }
+ if existingTolerations.Has(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) {
+ continue
+ }
+ *tolerations = append(*tolerations, toleration)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go
new file mode 100644
index 0000000..7b83248
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v1beta1 is the v1beta1 version of the API.
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/core
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:protobuf-gen=package
+
+//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/core-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../hack/api-reference/core.md
+
+// Package v1beta1 is a version of the API.
+// +groupName=core.gardener.cloud
+package v1beta1
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go
new file mode 100644
index 0000000..8f09d53
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go
@@ -0,0 +1,39570 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
+ v13 "k8s.io/api/rbac/v1"
+ resource "k8s.io/apimachinery/pkg/api/resource"
+ v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Addon) Reset() { *m = Addon{} }
+func (*Addon) ProtoMessage() {}
+func (*Addon) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{0}
+}
+func (m *Addon) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Addon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Addon) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Addon.Merge(m, src)
+}
+func (m *Addon) XXX_Size() int {
+ return m.Size()
+}
+func (m *Addon) XXX_DiscardUnknown() {
+ xxx_messageInfo_Addon.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Addon proto.InternalMessageInfo
+
+func (m *Addons) Reset() { *m = Addons{} }
+func (*Addons) ProtoMessage() {}
+func (*Addons) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{1}
+}
+func (m *Addons) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Addons) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Addons) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Addons.Merge(m, src)
+}
+func (m *Addons) XXX_Size() int {
+ return m.Size()
+}
+func (m *Addons) XXX_DiscardUnknown() {
+ xxx_messageInfo_Addons.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Addons proto.InternalMessageInfo
+
+func (m *AdmissionPlugin) Reset() { *m = AdmissionPlugin{} }
+func (*AdmissionPlugin) ProtoMessage() {}
+func (*AdmissionPlugin) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{2}
+}
+func (m *AdmissionPlugin) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AdmissionPlugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AdmissionPlugin) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AdmissionPlugin.Merge(m, src)
+}
+func (m *AdmissionPlugin) XXX_Size() int {
+ return m.Size()
+}
+func (m *AdmissionPlugin) XXX_DiscardUnknown() {
+ xxx_messageInfo_AdmissionPlugin.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionPlugin proto.InternalMessageInfo
+
+func (m *Alerting) Reset() { *m = Alerting{} }
+func (*Alerting) ProtoMessage() {}
+func (*Alerting) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{3}
+}
+func (m *Alerting) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Alerting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Alerting) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Alerting.Merge(m, src)
+}
+func (m *Alerting) XXX_Size() int {
+ return m.Size()
+}
+func (m *Alerting) XXX_DiscardUnknown() {
+ xxx_messageInfo_Alerting.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Alerting proto.InternalMessageInfo
+
+func (m *AuditConfig) Reset() { *m = AuditConfig{} }
+func (*AuditConfig) ProtoMessage() {}
+func (*AuditConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{4}
+}
+func (m *AuditConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuditConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AuditConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuditConfig.Merge(m, src)
+}
+func (m *AuditConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuditConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuditConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuditConfig proto.InternalMessageInfo
+
+func (m *AuditPolicy) Reset() { *m = AuditPolicy{} }
+func (*AuditPolicy) ProtoMessage() {}
+func (*AuditPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{5}
+}
+func (m *AuditPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuditPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AuditPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuditPolicy.Merge(m, src)
+}
+func (m *AuditPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuditPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuditPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuditPolicy proto.InternalMessageInfo
+
+func (m *AvailabilityZone) Reset() { *m = AvailabilityZone{} }
+func (*AvailabilityZone) ProtoMessage() {}
+func (*AvailabilityZone) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{6}
+}
+func (m *AvailabilityZone) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AvailabilityZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AvailabilityZone) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AvailabilityZone.Merge(m, src)
+}
+func (m *AvailabilityZone) XXX_Size() int {
+ return m.Size()
+}
+func (m *AvailabilityZone) XXX_DiscardUnknown() {
+ xxx_messageInfo_AvailabilityZone.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AvailabilityZone proto.InternalMessageInfo
+
+func (m *BackupBucket) Reset() { *m = BackupBucket{} }
+func (*BackupBucket) ProtoMessage() {}
+func (*BackupBucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{7}
+}
+func (m *BackupBucket) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucket.Merge(m, src)
+}
+func (m *BackupBucket) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucket proto.InternalMessageInfo
+
+func (m *BackupBucketList) Reset() { *m = BackupBucketList{} }
+func (*BackupBucketList) ProtoMessage() {}
+func (*BackupBucketList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{8}
+}
+func (m *BackupBucketList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketList.Merge(m, src)
+}
+func (m *BackupBucketList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketList proto.InternalMessageInfo
+
+func (m *BackupBucketProvider) Reset() { *m = BackupBucketProvider{} }
+func (*BackupBucketProvider) ProtoMessage() {}
+func (*BackupBucketProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{9}
+}
+func (m *BackupBucketProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketProvider.Merge(m, src)
+}
+func (m *BackupBucketProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketProvider proto.InternalMessageInfo
+
+func (m *BackupBucketSpec) Reset() { *m = BackupBucketSpec{} }
+func (*BackupBucketSpec) ProtoMessage() {}
+func (*BackupBucketSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{10}
+}
+func (m *BackupBucketSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketSpec.Merge(m, src)
+}
+func (m *BackupBucketSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketSpec proto.InternalMessageInfo
+
+func (m *BackupBucketStatus) Reset() { *m = BackupBucketStatus{} }
+func (*BackupBucketStatus) ProtoMessage() {}
+func (*BackupBucketStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{11}
+}
+func (m *BackupBucketStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupBucketStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupBucketStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupBucketStatus.Merge(m, src)
+}
+func (m *BackupBucketStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupBucketStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupBucketStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupBucketStatus proto.InternalMessageInfo
+
+func (m *BackupEntry) Reset() { *m = BackupEntry{} }
+func (*BackupEntry) ProtoMessage() {}
+func (*BackupEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{12}
+}
+func (m *BackupEntry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntry.Merge(m, src)
+}
+func (m *BackupEntry) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntry proto.InternalMessageInfo
+
+func (m *BackupEntryList) Reset() { *m = BackupEntryList{} }
+func (*BackupEntryList) ProtoMessage() {}
+func (*BackupEntryList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{13}
+}
+func (m *BackupEntryList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntryList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntryList.Merge(m, src)
+}
+func (m *BackupEntryList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntryList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntryList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntryList proto.InternalMessageInfo
+
+func (m *BackupEntrySpec) Reset() { *m = BackupEntrySpec{} }
+func (*BackupEntrySpec) ProtoMessage() {}
+func (*BackupEntrySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{14}
+}
+func (m *BackupEntrySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntrySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntrySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntrySpec.Merge(m, src)
+}
+func (m *BackupEntrySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntrySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntrySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntrySpec proto.InternalMessageInfo
+
+func (m *BackupEntryStatus) Reset() { *m = BackupEntryStatus{} }
+func (*BackupEntryStatus) ProtoMessage() {}
+func (*BackupEntryStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{15}
+}
+func (m *BackupEntryStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BackupEntryStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BackupEntryStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BackupEntryStatus.Merge(m, src)
+}
+func (m *BackupEntryStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *BackupEntryStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_BackupEntryStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BackupEntryStatus proto.InternalMessageInfo
+
+func (m *CRI) Reset() { *m = CRI{} }
+func (*CRI) ProtoMessage() {}
+func (*CRI) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{16}
+}
+func (m *CRI) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CRI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CRI) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CRI.Merge(m, src)
+}
+func (m *CRI) XXX_Size() int {
+ return m.Size()
+}
+func (m *CRI) XXX_DiscardUnknown() {
+ xxx_messageInfo_CRI.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CRI proto.InternalMessageInfo
+
+func (m *CloudInfo) Reset() { *m = CloudInfo{} }
+func (*CloudInfo) ProtoMessage() {}
+func (*CloudInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{17}
+}
+func (m *CloudInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudInfo.Merge(m, src)
+}
+func (m *CloudInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudInfo proto.InternalMessageInfo
+
+func (m *CloudProfile) Reset() { *m = CloudProfile{} }
+func (*CloudProfile) ProtoMessage() {}
+func (*CloudProfile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{18}
+}
+func (m *CloudProfile) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudProfile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudProfile.Merge(m, src)
+}
+func (m *CloudProfile) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudProfile) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudProfile proto.InternalMessageInfo
+
+func (m *CloudProfileList) Reset() { *m = CloudProfileList{} }
+func (*CloudProfileList) ProtoMessage() {}
+func (*CloudProfileList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{19}
+}
+func (m *CloudProfileList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudProfileList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudProfileList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudProfileList.Merge(m, src)
+}
+func (m *CloudProfileList) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudProfileList) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudProfileList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudProfileList proto.InternalMessageInfo
+
+func (m *CloudProfileSpec) Reset() { *m = CloudProfileSpec{} }
+func (*CloudProfileSpec) ProtoMessage() {}
+func (*CloudProfileSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{20}
+}
+func (m *CloudProfileSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudProfileSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudProfileSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudProfileSpec.Merge(m, src)
+}
+func (m *CloudProfileSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudProfileSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudProfileSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudProfileSpec proto.InternalMessageInfo
+
+func (m *ClusterAutoscaler) Reset() { *m = ClusterAutoscaler{} }
+func (*ClusterAutoscaler) ProtoMessage() {}
+func (*ClusterAutoscaler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{21}
+}
+func (m *ClusterAutoscaler) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterAutoscaler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterAutoscaler.Merge(m, src)
+}
+func (m *ClusterAutoscaler) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterAutoscaler) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterAutoscaler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterAutoscaler proto.InternalMessageInfo
+
+func (m *ClusterInfo) Reset() { *m = ClusterInfo{} }
+func (*ClusterInfo) ProtoMessage() {}
+func (*ClusterInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{22}
+}
+func (m *ClusterInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterInfo.Merge(m, src)
+}
+func (m *ClusterInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterInfo proto.InternalMessageInfo
+
+func (m *Condition) Reset() { *m = Condition{} }
+func (*Condition) ProtoMessage() {}
+func (*Condition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{23}
+}
+func (m *Condition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Condition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Condition.Merge(m, src)
+}
+func (m *Condition) XXX_Size() int {
+ return m.Size()
+}
+func (m *Condition) XXX_DiscardUnknown() {
+ xxx_messageInfo_Condition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Condition proto.InternalMessageInfo
+
+func (m *ContainerRuntime) Reset() { *m = ContainerRuntime{} }
+func (*ContainerRuntime) ProtoMessage() {}
+func (*ContainerRuntime) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{24}
+}
+func (m *ContainerRuntime) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ContainerRuntime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ContainerRuntime) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ContainerRuntime.Merge(m, src)
+}
+func (m *ContainerRuntime) XXX_Size() int {
+ return m.Size()
+}
+func (m *ContainerRuntime) XXX_DiscardUnknown() {
+ xxx_messageInfo_ContainerRuntime.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerRuntime proto.InternalMessageInfo
+
+func (m *ControllerDeployment) Reset() { *m = ControllerDeployment{} }
+func (*ControllerDeployment) ProtoMessage() {}
+func (*ControllerDeployment) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{25}
+}
+func (m *ControllerDeployment) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerDeployment) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerDeployment.Merge(m, src)
+}
+func (m *ControllerDeployment) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerDeployment) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerDeployment.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerDeployment proto.InternalMessageInfo
+
+func (m *ControllerInstallation) Reset() { *m = ControllerInstallation{} }
+func (*ControllerInstallation) ProtoMessage() {}
+func (*ControllerInstallation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{26}
+}
+func (m *ControllerInstallation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallation.Merge(m, src)
+}
+func (m *ControllerInstallation) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallation) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallation proto.InternalMessageInfo
+
+func (m *ControllerInstallationList) Reset() { *m = ControllerInstallationList{} }
+func (*ControllerInstallationList) ProtoMessage() {}
+func (*ControllerInstallationList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{27}
+}
+func (m *ControllerInstallationList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallationList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallationList.Merge(m, src)
+}
+func (m *ControllerInstallationList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallationList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallationList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallationList proto.InternalMessageInfo
+
+func (m *ControllerInstallationSpec) Reset() { *m = ControllerInstallationSpec{} }
+func (*ControllerInstallationSpec) ProtoMessage() {}
+func (*ControllerInstallationSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{28}
+}
+func (m *ControllerInstallationSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallationSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallationSpec.Merge(m, src)
+}
+func (m *ControllerInstallationSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallationSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallationSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallationSpec proto.InternalMessageInfo
+
+func (m *ControllerInstallationStatus) Reset() { *m = ControllerInstallationStatus{} }
+func (*ControllerInstallationStatus) ProtoMessage() {}
+func (*ControllerInstallationStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{29}
+}
+func (m *ControllerInstallationStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerInstallationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerInstallationStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerInstallationStatus.Merge(m, src)
+}
+func (m *ControllerInstallationStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerInstallationStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerInstallationStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerInstallationStatus proto.InternalMessageInfo
+
+func (m *ControllerRegistration) Reset() { *m = ControllerRegistration{} }
+func (*ControllerRegistration) ProtoMessage() {}
+func (*ControllerRegistration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{30}
+}
+func (m *ControllerRegistration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRegistration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRegistration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRegistration.Merge(m, src)
+}
+func (m *ControllerRegistration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRegistration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRegistration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRegistration proto.InternalMessageInfo
+
+func (m *ControllerRegistrationList) Reset() { *m = ControllerRegistrationList{} }
+func (*ControllerRegistrationList) ProtoMessage() {}
+func (*ControllerRegistrationList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{31}
+}
+func (m *ControllerRegistrationList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRegistrationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRegistrationList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRegistrationList.Merge(m, src)
+}
+func (m *ControllerRegistrationList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRegistrationList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRegistrationList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRegistrationList proto.InternalMessageInfo
+
+func (m *ControllerRegistrationSpec) Reset() { *m = ControllerRegistrationSpec{} }
+func (*ControllerRegistrationSpec) ProtoMessage() {}
+func (*ControllerRegistrationSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{32}
+}
+func (m *ControllerRegistrationSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerRegistrationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerRegistrationSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerRegistrationSpec.Merge(m, src)
+}
+func (m *ControllerRegistrationSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerRegistrationSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerRegistrationSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerRegistrationSpec proto.InternalMessageInfo
+
+func (m *ControllerResource) Reset() { *m = ControllerResource{} }
+func (*ControllerResource) ProtoMessage() {}
+func (*ControllerResource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{33}
+}
+func (m *ControllerResource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ControllerResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ControllerResource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ControllerResource.Merge(m, src)
+}
+func (m *ControllerResource) XXX_Size() int {
+ return m.Size()
+}
+func (m *ControllerResource) XXX_DiscardUnknown() {
+ xxx_messageInfo_ControllerResource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerResource proto.InternalMessageInfo
+
+func (m *DNS) Reset() { *m = DNS{} }
+func (*DNS) ProtoMessage() {}
+func (*DNS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{34}
+}
+func (m *DNS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DNS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DNS.Merge(m, src)
+}
+func (m *DNS) XXX_Size() int {
+ return m.Size()
+}
+func (m *DNS) XXX_DiscardUnknown() {
+ xxx_messageInfo_DNS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DNS proto.InternalMessageInfo
+
+func (m *DNSIncludeExclude) Reset() { *m = DNSIncludeExclude{} }
+func (*DNSIncludeExclude) ProtoMessage() {}
+func (*DNSIncludeExclude) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{35}
+}
+func (m *DNSIncludeExclude) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DNSIncludeExclude) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DNSIncludeExclude) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DNSIncludeExclude.Merge(m, src)
+}
+func (m *DNSIncludeExclude) XXX_Size() int {
+ return m.Size()
+}
+func (m *DNSIncludeExclude) XXX_DiscardUnknown() {
+ xxx_messageInfo_DNSIncludeExclude.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DNSIncludeExclude proto.InternalMessageInfo
+
+func (m *DNSProvider) Reset() { *m = DNSProvider{} }
+func (*DNSProvider) ProtoMessage() {}
+func (*DNSProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{36}
+}
+func (m *DNSProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DNSProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DNSProvider.Merge(m, src)
+}
+func (m *DNSProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *DNSProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_DNSProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DNSProvider proto.InternalMessageInfo
+
+func (m *DataVolume) Reset() { *m = DataVolume{} }
+func (*DataVolume) ProtoMessage() {}
+func (*DataVolume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{37}
+}
+func (m *DataVolume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DataVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DataVolume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DataVolume.Merge(m, src)
+}
+func (m *DataVolume) XXX_Size() int {
+ return m.Size()
+}
+func (m *DataVolume) XXX_DiscardUnknown() {
+ xxx_messageInfo_DataVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DataVolume proto.InternalMessageInfo
+
+func (m *Endpoint) Reset() { *m = Endpoint{} }
+func (*Endpoint) ProtoMessage() {}
+func (*Endpoint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{38}
+}
+func (m *Endpoint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Endpoint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Endpoint.Merge(m, src)
+}
+func (m *Endpoint) XXX_Size() int {
+ return m.Size()
+}
+func (m *Endpoint) XXX_DiscardUnknown() {
+ xxx_messageInfo_Endpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Endpoint proto.InternalMessageInfo
+
+func (m *ExpirableVersion) Reset() { *m = ExpirableVersion{} }
+func (*ExpirableVersion) ProtoMessage() {}
+func (*ExpirableVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{39}
+}
+func (m *ExpirableVersion) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExpirableVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExpirableVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExpirableVersion.Merge(m, src)
+}
+func (m *ExpirableVersion) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExpirableVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExpirableVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExpirableVersion proto.InternalMessageInfo
+
+func (m *Extension) Reset() { *m = Extension{} }
+func (*Extension) ProtoMessage() {}
+func (*Extension) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{40}
+}
+func (m *Extension) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Extension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Extension) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Extension.Merge(m, src)
+}
+func (m *Extension) XXX_Size() int {
+ return m.Size()
+}
+func (m *Extension) XXX_DiscardUnknown() {
+ xxx_messageInfo_Extension.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Extension proto.InternalMessageInfo
+
+func (m *Gardener) Reset() { *m = Gardener{} }
+func (*Gardener) ProtoMessage() {}
+func (*Gardener) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{41}
+}
+func (m *Gardener) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Gardener) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Gardener) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Gardener.Merge(m, src)
+}
+func (m *Gardener) XXX_Size() int {
+ return m.Size()
+}
+func (m *Gardener) XXX_DiscardUnknown() {
+ xxx_messageInfo_Gardener.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gardener proto.InternalMessageInfo
+
+func (m *Hibernation) Reset() { *m = Hibernation{} }
+func (*Hibernation) ProtoMessage() {}
+func (*Hibernation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{42}
+}
+func (m *Hibernation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Hibernation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Hibernation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Hibernation.Merge(m, src)
+}
+func (m *Hibernation) XXX_Size() int {
+ return m.Size()
+}
+func (m *Hibernation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Hibernation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Hibernation proto.InternalMessageInfo
+
+func (m *HibernationSchedule) Reset() { *m = HibernationSchedule{} }
+func (*HibernationSchedule) ProtoMessage() {}
+func (*HibernationSchedule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{43}
+}
+func (m *HibernationSchedule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HibernationSchedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HibernationSchedule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HibernationSchedule.Merge(m, src)
+}
+func (m *HibernationSchedule) XXX_Size() int {
+ return m.Size()
+}
+func (m *HibernationSchedule) XXX_DiscardUnknown() {
+ xxx_messageInfo_HibernationSchedule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HibernationSchedule proto.InternalMessageInfo
+
+func (m *HorizontalPodAutoscalerConfig) Reset() { *m = HorizontalPodAutoscalerConfig{} }
+func (*HorizontalPodAutoscalerConfig) ProtoMessage() {}
+func (*HorizontalPodAutoscalerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{44}
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HorizontalPodAutoscalerConfig.Merge(m, src)
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *HorizontalPodAutoscalerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_HorizontalPodAutoscalerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HorizontalPodAutoscalerConfig proto.InternalMessageInfo
+
+func (m *Ingress) Reset() { *m = Ingress{} }
+func (*Ingress) ProtoMessage() {}
+func (*Ingress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{45}
+}
+func (m *Ingress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Ingress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Ingress.Merge(m, src)
+}
+func (m *Ingress) XXX_Size() int {
+ return m.Size()
+}
+func (m *Ingress) XXX_DiscardUnknown() {
+ xxx_messageInfo_Ingress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Ingress proto.InternalMessageInfo
+
+func (m *IngressController) Reset() { *m = IngressController{} }
+func (*IngressController) ProtoMessage() {}
+func (*IngressController) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{46}
+}
+func (m *IngressController) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressController) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressController.Merge(m, src)
+}
+func (m *IngressController) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressController) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressController.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressController proto.InternalMessageInfo
+
+func (m *KubeAPIServerConfig) Reset() { *m = KubeAPIServerConfig{} }
+func (*KubeAPIServerConfig) ProtoMessage() {}
+func (*KubeAPIServerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{47}
+}
+func (m *KubeAPIServerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeAPIServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeAPIServerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeAPIServerConfig.Merge(m, src)
+}
+func (m *KubeAPIServerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeAPIServerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeAPIServerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeAPIServerConfig proto.InternalMessageInfo
+
+func (m *KubeAPIServerRequests) Reset() { *m = KubeAPIServerRequests{} }
+func (*KubeAPIServerRequests) ProtoMessage() {}
+func (*KubeAPIServerRequests) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{48}
+}
+func (m *KubeAPIServerRequests) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeAPIServerRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeAPIServerRequests) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeAPIServerRequests.Merge(m, src)
+}
+func (m *KubeAPIServerRequests) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeAPIServerRequests) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeAPIServerRequests.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeAPIServerRequests proto.InternalMessageInfo
+
+func (m *KubeControllerManagerConfig) Reset() { *m = KubeControllerManagerConfig{} }
+func (*KubeControllerManagerConfig) ProtoMessage() {}
+func (*KubeControllerManagerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{49}
+}
+func (m *KubeControllerManagerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeControllerManagerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeControllerManagerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeControllerManagerConfig.Merge(m, src)
+}
+func (m *KubeControllerManagerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeControllerManagerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeControllerManagerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeControllerManagerConfig proto.InternalMessageInfo
+
+func (m *KubeProxyConfig) Reset() { *m = KubeProxyConfig{} }
+func (*KubeProxyConfig) ProtoMessage() {}
+func (*KubeProxyConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{50}
+}
+func (m *KubeProxyConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeProxyConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeProxyConfig.Merge(m, src)
+}
+func (m *KubeProxyConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeProxyConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeProxyConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeProxyConfig proto.InternalMessageInfo
+
+func (m *KubeSchedulerConfig) Reset() { *m = KubeSchedulerConfig{} }
+func (*KubeSchedulerConfig) ProtoMessage() {}
+func (*KubeSchedulerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{51}
+}
+func (m *KubeSchedulerConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeSchedulerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeSchedulerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeSchedulerConfig.Merge(m, src)
+}
+func (m *KubeSchedulerConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeSchedulerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeSchedulerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeSchedulerConfig proto.InternalMessageInfo
+
+func (m *KubeletConfig) Reset() { *m = KubeletConfig{} }
+func (*KubeletConfig) ProtoMessage() {}
+func (*KubeletConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{52}
+}
+func (m *KubeletConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfig.Merge(m, src)
+}
+func (m *KubeletConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfig proto.InternalMessageInfo
+
+func (m *KubeletConfigEviction) Reset() { *m = KubeletConfigEviction{} }
+func (*KubeletConfigEviction) ProtoMessage() {}
+func (*KubeletConfigEviction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{53}
+}
+func (m *KubeletConfigEviction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigEviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigEviction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigEviction.Merge(m, src)
+}
+func (m *KubeletConfigEviction) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigEviction) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigEviction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigEviction proto.InternalMessageInfo
+
+func (m *KubeletConfigEvictionMinimumReclaim) Reset() { *m = KubeletConfigEvictionMinimumReclaim{} }
+func (*KubeletConfigEvictionMinimumReclaim) ProtoMessage() {}
+func (*KubeletConfigEvictionMinimumReclaim) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{54}
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigEvictionMinimumReclaim.Merge(m, src)
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigEvictionMinimumReclaim) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigEvictionMinimumReclaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigEvictionMinimumReclaim proto.InternalMessageInfo
+
+func (m *KubeletConfigEvictionSoftGracePeriod) Reset() { *m = KubeletConfigEvictionSoftGracePeriod{} }
+func (*KubeletConfigEvictionSoftGracePeriod) ProtoMessage() {}
+func (*KubeletConfigEvictionSoftGracePeriod) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{55}
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod.Merge(m, src)
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod proto.InternalMessageInfo
+
+func (m *KubeletConfigReserved) Reset() { *m = KubeletConfigReserved{} }
+func (*KubeletConfigReserved) ProtoMessage() {}
+func (*KubeletConfigReserved) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{56}
+}
+func (m *KubeletConfigReserved) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubeletConfigReserved) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubeletConfigReserved) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubeletConfigReserved.Merge(m, src)
+}
+func (m *KubeletConfigReserved) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubeletConfigReserved) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubeletConfigReserved.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubeletConfigReserved proto.InternalMessageInfo
+
+func (m *Kubernetes) Reset() { *m = Kubernetes{} }
+func (*Kubernetes) ProtoMessage() {}
+func (*Kubernetes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{57}
+}
+func (m *Kubernetes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Kubernetes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Kubernetes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Kubernetes.Merge(m, src)
+}
+func (m *Kubernetes) XXX_Size() int {
+ return m.Size()
+}
+func (m *Kubernetes) XXX_DiscardUnknown() {
+ xxx_messageInfo_Kubernetes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Kubernetes proto.InternalMessageInfo
+
+func (m *KubernetesConfig) Reset() { *m = KubernetesConfig{} }
+func (*KubernetesConfig) ProtoMessage() {}
+func (*KubernetesConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{58}
+}
+func (m *KubernetesConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesConfig.Merge(m, src)
+}
+func (m *KubernetesConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesConfig proto.InternalMessageInfo
+
+func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} }
+func (*KubernetesDashboard) ProtoMessage() {}
+func (*KubernetesDashboard) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{59}
+}
+func (m *KubernetesDashboard) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesDashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesDashboard) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesDashboard.Merge(m, src)
+}
+func (m *KubernetesDashboard) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesDashboard) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesDashboard.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesDashboard proto.InternalMessageInfo
+
+func (m *KubernetesInfo) Reset() { *m = KubernetesInfo{} }
+func (*KubernetesInfo) ProtoMessage() {}
+func (*KubernetesInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{60}
+}
+func (m *KubernetesInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesInfo.Merge(m, src)
+}
+func (m *KubernetesInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesInfo proto.InternalMessageInfo
+
+func (m *KubernetesSettings) Reset() { *m = KubernetesSettings{} }
+func (*KubernetesSettings) ProtoMessage() {}
+func (*KubernetesSettings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{61}
+}
+func (m *KubernetesSettings) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KubernetesSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KubernetesSettings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesSettings.Merge(m, src)
+}
+func (m *KubernetesSettings) XXX_Size() int {
+ return m.Size()
+}
+func (m *KubernetesSettings) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesSettings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesSettings proto.InternalMessageInfo
+
+func (m *LastError) Reset() { *m = LastError{} }
+func (*LastError) ProtoMessage() {}
+func (*LastError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{62}
+}
+func (m *LastError) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LastError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LastError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LastError.Merge(m, src)
+}
+func (m *LastError) XXX_Size() int {
+ return m.Size()
+}
+func (m *LastError) XXX_DiscardUnknown() {
+ xxx_messageInfo_LastError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LastError proto.InternalMessageInfo
+
+func (m *LastOperation) Reset() { *m = LastOperation{} }
+func (*LastOperation) ProtoMessage() {}
+func (*LastOperation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{63}
+}
+func (m *LastOperation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LastOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LastOperation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LastOperation.Merge(m, src)
+}
+func (m *LastOperation) XXX_Size() int {
+ return m.Size()
+}
+func (m *LastOperation) XXX_DiscardUnknown() {
+ xxx_messageInfo_LastOperation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LastOperation proto.InternalMessageInfo
+
+func (m *Machine) Reset() { *m = Machine{} }
+func (*Machine) ProtoMessage() {}
+func (*Machine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{64}
+}
+func (m *Machine) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Machine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Machine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Machine.Merge(m, src)
+}
+func (m *Machine) XXX_Size() int {
+ return m.Size()
+}
+func (m *Machine) XXX_DiscardUnknown() {
+ xxx_messageInfo_Machine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Machine proto.InternalMessageInfo
+
+func (m *MachineControllerManagerSettings) Reset() { *m = MachineControllerManagerSettings{} }
+func (*MachineControllerManagerSettings) ProtoMessage() {}
+func (*MachineControllerManagerSettings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{65}
+}
+func (m *MachineControllerManagerSettings) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineControllerManagerSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineControllerManagerSettings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineControllerManagerSettings.Merge(m, src)
+}
+func (m *MachineControllerManagerSettings) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineControllerManagerSettings) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineControllerManagerSettings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineControllerManagerSettings proto.InternalMessageInfo
+
+func (m *MachineImage) Reset() { *m = MachineImage{} }
+func (*MachineImage) ProtoMessage() {}
+func (*MachineImage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{66}
+}
+func (m *MachineImage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineImage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineImage.Merge(m, src)
+}
+func (m *MachineImage) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineImage) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineImage proto.InternalMessageInfo
+
+func (m *MachineImageVersion) Reset() { *m = MachineImageVersion{} }
+func (*MachineImageVersion) ProtoMessage() {}
+func (*MachineImageVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{67}
+}
+func (m *MachineImageVersion) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineImageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineImageVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineImageVersion.Merge(m, src)
+}
+func (m *MachineImageVersion) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineImageVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineImageVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineImageVersion proto.InternalMessageInfo
+
+func (m *MachineType) Reset() { *m = MachineType{} }
+func (*MachineType) ProtoMessage() {}
+func (*MachineType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{68}
+}
+func (m *MachineType) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineType.Merge(m, src)
+}
+func (m *MachineType) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineType) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineType proto.InternalMessageInfo
+
+func (m *MachineTypeStorage) Reset() { *m = MachineTypeStorage{} }
+func (*MachineTypeStorage) ProtoMessage() {}
+func (*MachineTypeStorage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{69}
+}
+func (m *MachineTypeStorage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MachineTypeStorage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MachineTypeStorage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MachineTypeStorage.Merge(m, src)
+}
+func (m *MachineTypeStorage) XXX_Size() int {
+ return m.Size()
+}
+func (m *MachineTypeStorage) XXX_DiscardUnknown() {
+ xxx_messageInfo_MachineTypeStorage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MachineTypeStorage proto.InternalMessageInfo
+
+func (m *Maintenance) Reset() { *m = Maintenance{} }
+func (*Maintenance) ProtoMessage() {}
+func (*Maintenance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{70}
+}
+func (m *Maintenance) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Maintenance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Maintenance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Maintenance.Merge(m, src)
+}
+func (m *Maintenance) XXX_Size() int {
+ return m.Size()
+}
+func (m *Maintenance) XXX_DiscardUnknown() {
+ xxx_messageInfo_Maintenance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Maintenance proto.InternalMessageInfo
+
+func (m *MaintenanceAutoUpdate) Reset() { *m = MaintenanceAutoUpdate{} }
+func (*MaintenanceAutoUpdate) ProtoMessage() {}
+func (*MaintenanceAutoUpdate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{71}
+}
+func (m *MaintenanceAutoUpdate) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MaintenanceAutoUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MaintenanceAutoUpdate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MaintenanceAutoUpdate.Merge(m, src)
+}
+func (m *MaintenanceAutoUpdate) XXX_Size() int {
+ return m.Size()
+}
+func (m *MaintenanceAutoUpdate) XXX_DiscardUnknown() {
+ xxx_messageInfo_MaintenanceAutoUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MaintenanceAutoUpdate proto.InternalMessageInfo
+
+func (m *MaintenanceTimeWindow) Reset() { *m = MaintenanceTimeWindow{} }
+func (*MaintenanceTimeWindow) ProtoMessage() {}
+func (*MaintenanceTimeWindow) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{72}
+}
+func (m *MaintenanceTimeWindow) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MaintenanceTimeWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MaintenanceTimeWindow) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MaintenanceTimeWindow.Merge(m, src)
+}
+func (m *MaintenanceTimeWindow) XXX_Size() int {
+ return m.Size()
+}
+func (m *MaintenanceTimeWindow) XXX_DiscardUnknown() {
+ xxx_messageInfo_MaintenanceTimeWindow.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MaintenanceTimeWindow proto.InternalMessageInfo
+
+func (m *Monitoring) Reset() { *m = Monitoring{} }
+func (*Monitoring) ProtoMessage() {}
+func (*Monitoring) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{73}
+}
+func (m *Monitoring) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Monitoring) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Monitoring.Merge(m, src)
+}
+func (m *Monitoring) XXX_Size() int {
+ return m.Size()
+}
+func (m *Monitoring) XXX_DiscardUnknown() {
+ xxx_messageInfo_Monitoring.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Monitoring proto.InternalMessageInfo
+
+func (m *NamedResourceReference) Reset() { *m = NamedResourceReference{} }
+func (*NamedResourceReference) ProtoMessage() {}
+func (*NamedResourceReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{74}
+}
+func (m *NamedResourceReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedResourceReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedResourceReference.Merge(m, src)
+}
+func (m *NamedResourceReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedResourceReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedResourceReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedResourceReference proto.InternalMessageInfo
+
+func (m *Networking) Reset() { *m = Networking{} }
+func (*Networking) ProtoMessage() {}
+func (*Networking) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{75}
+}
+func (m *Networking) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Networking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Networking) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Networking.Merge(m, src)
+}
+func (m *Networking) XXX_Size() int {
+ return m.Size()
+}
+func (m *Networking) XXX_DiscardUnknown() {
+ xxx_messageInfo_Networking.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Networking proto.InternalMessageInfo
+
+func (m *NginxIngress) Reset() { *m = NginxIngress{} }
+func (*NginxIngress) ProtoMessage() {}
+func (*NginxIngress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{76}
+}
+func (m *NginxIngress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NginxIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NginxIngress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NginxIngress.Merge(m, src)
+}
+func (m *NginxIngress) XXX_Size() int {
+ return m.Size()
+}
+func (m *NginxIngress) XXX_DiscardUnknown() {
+ xxx_messageInfo_NginxIngress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NginxIngress proto.InternalMessageInfo
+
+func (m *OIDCConfig) Reset() { *m = OIDCConfig{} }
+func (*OIDCConfig) ProtoMessage() {}
+func (*OIDCConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{77}
+}
+func (m *OIDCConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OIDCConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OIDCConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OIDCConfig.Merge(m, src)
+}
+func (m *OIDCConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *OIDCConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_OIDCConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OIDCConfig proto.InternalMessageInfo
+
+func (m *OpenIDConnectClientAuthentication) Reset() { *m = OpenIDConnectClientAuthentication{} }
+func (*OpenIDConnectClientAuthentication) ProtoMessage() {}
+func (*OpenIDConnectClientAuthentication) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{78}
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OpenIDConnectClientAuthentication.Merge(m, src)
+}
+func (m *OpenIDConnectClientAuthentication) XXX_Size() int {
+ return m.Size()
+}
+func (m *OpenIDConnectClientAuthentication) XXX_DiscardUnknown() {
+ xxx_messageInfo_OpenIDConnectClientAuthentication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpenIDConnectClientAuthentication proto.InternalMessageInfo
+
+func (m *Plant) Reset() { *m = Plant{} }
+func (*Plant) ProtoMessage() {}
+func (*Plant) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{79}
+}
+func (m *Plant) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Plant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Plant) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Plant.Merge(m, src)
+}
+func (m *Plant) XXX_Size() int {
+ return m.Size()
+}
+func (m *Plant) XXX_DiscardUnknown() {
+ xxx_messageInfo_Plant.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Plant proto.InternalMessageInfo
+
+func (m *PlantList) Reset() { *m = PlantList{} }
+func (*PlantList) ProtoMessage() {}
+func (*PlantList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{80}
+}
+func (m *PlantList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PlantList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PlantList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlantList.Merge(m, src)
+}
+func (m *PlantList) XXX_Size() int {
+ return m.Size()
+}
+func (m *PlantList) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlantList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlantList proto.InternalMessageInfo
+
+func (m *PlantSpec) Reset() { *m = PlantSpec{} }
+func (*PlantSpec) ProtoMessage() {}
+func (*PlantSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{81}
+}
+func (m *PlantSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PlantSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PlantSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlantSpec.Merge(m, src)
+}
+func (m *PlantSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *PlantSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlantSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlantSpec proto.InternalMessageInfo
+
+func (m *PlantStatus) Reset() { *m = PlantStatus{} }
+func (*PlantStatus) ProtoMessage() {}
+func (*PlantStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{82}
+}
+func (m *PlantStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PlantStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PlantStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlantStatus.Merge(m, src)
+}
+func (m *PlantStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *PlantStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlantStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlantStatus proto.InternalMessageInfo
+
+func (m *Project) Reset() { *m = Project{} }
+func (*Project) ProtoMessage() {}
+func (*Project) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{83}
+}
+func (m *Project) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Project) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Project.Merge(m, src)
+}
+func (m *Project) XXX_Size() int {
+ return m.Size()
+}
+func (m *Project) XXX_DiscardUnknown() {
+ xxx_messageInfo_Project.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Project proto.InternalMessageInfo
+
+func (m *ProjectList) Reset() { *m = ProjectList{} }
+func (*ProjectList) ProtoMessage() {}
+func (*ProjectList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{84}
+}
+func (m *ProjectList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectList.Merge(m, src)
+}
+func (m *ProjectList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectList proto.InternalMessageInfo
+
+func (m *ProjectMember) Reset() { *m = ProjectMember{} }
+func (*ProjectMember) ProtoMessage() {}
+func (*ProjectMember) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{85}
+}
+func (m *ProjectMember) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectMember) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectMember.Merge(m, src)
+}
+func (m *ProjectMember) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectMember) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectMember proto.InternalMessageInfo
+
+func (m *ProjectSpec) Reset() { *m = ProjectSpec{} }
+func (*ProjectSpec) ProtoMessage() {}
+func (*ProjectSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{86}
+}
+func (m *ProjectSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectSpec.Merge(m, src)
+}
+func (m *ProjectSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo
+
+func (m *ProjectStatus) Reset() { *m = ProjectStatus{} }
+func (*ProjectStatus) ProtoMessage() {}
+func (*ProjectStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{87}
+}
+func (m *ProjectStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectStatus.Merge(m, src)
+}
+func (m *ProjectStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo
+
+func (m *ProjectTolerations) Reset() { *m = ProjectTolerations{} }
+func (*ProjectTolerations) ProtoMessage() {}
+func (*ProjectTolerations) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{88}
+}
+func (m *ProjectTolerations) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectTolerations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectTolerations) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectTolerations.Merge(m, src)
+}
+func (m *ProjectTolerations) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectTolerations) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectTolerations.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectTolerations proto.InternalMessageInfo
+
+func (m *Provider) Reset() { *m = Provider{} }
+func (*Provider) ProtoMessage() {}
+func (*Provider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{89}
+}
+func (m *Provider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Provider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Provider.Merge(m, src)
+}
+func (m *Provider) XXX_Size() int {
+ return m.Size()
+}
+func (m *Provider) XXX_DiscardUnknown() {
+ xxx_messageInfo_Provider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Provider proto.InternalMessageInfo
+
+func (m *Quota) Reset() { *m = Quota{} }
+func (*Quota) ProtoMessage() {}
+func (*Quota) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{90}
+}
+func (m *Quota) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Quota) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Quota.Merge(m, src)
+}
+func (m *Quota) XXX_Size() int {
+ return m.Size()
+}
+func (m *Quota) XXX_DiscardUnknown() {
+ xxx_messageInfo_Quota.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quota proto.InternalMessageInfo
+
+func (m *QuotaList) Reset() { *m = QuotaList{} }
+func (*QuotaList) ProtoMessage() {}
+func (*QuotaList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{91}
+}
+func (m *QuotaList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *QuotaList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuotaList.Merge(m, src)
+}
+func (m *QuotaList) XXX_Size() int {
+ return m.Size()
+}
+func (m *QuotaList) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuotaList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuotaList proto.InternalMessageInfo
+
+func (m *QuotaSpec) Reset() { *m = QuotaSpec{} }
+func (*QuotaSpec) ProtoMessage() {}
+func (*QuotaSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{92}
+}
+func (m *QuotaSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *QuotaSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuotaSpec.Merge(m, src)
+}
+func (m *QuotaSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *QuotaSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuotaSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuotaSpec proto.InternalMessageInfo
+
+func (m *Region) Reset() { *m = Region{} }
+func (*Region) ProtoMessage() {}
+func (*Region) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{93}
+}
+func (m *Region) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Region) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Region) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Region.Merge(m, src)
+}
+func (m *Region) XXX_Size() int {
+ return m.Size()
+}
+func (m *Region) XXX_DiscardUnknown() {
+ xxx_messageInfo_Region.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Region proto.InternalMessageInfo
+
+func (m *ResourceWatchCacheSize) Reset() { *m = ResourceWatchCacheSize{} }
+func (*ResourceWatchCacheSize) ProtoMessage() {}
+func (*ResourceWatchCacheSize) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{94}
+}
+func (m *ResourceWatchCacheSize) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceWatchCacheSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceWatchCacheSize) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceWatchCacheSize.Merge(m, src)
+}
+func (m *ResourceWatchCacheSize) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceWatchCacheSize) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceWatchCacheSize.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceWatchCacheSize proto.InternalMessageInfo
+
+func (m *SecretBinding) Reset() { *m = SecretBinding{} }
+func (*SecretBinding) ProtoMessage() {}
+func (*SecretBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{95}
+}
+func (m *SecretBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretBinding.Merge(m, src)
+}
+func (m *SecretBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretBinding proto.InternalMessageInfo
+
+func (m *SecretBindingList) Reset() { *m = SecretBindingList{} }
+func (*SecretBindingList) ProtoMessage() {}
+func (*SecretBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{96}
+}
+func (m *SecretBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretBindingList.Merge(m, src)
+}
+func (m *SecretBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretBindingList proto.InternalMessageInfo
+
+func (m *Seed) Reset() { *m = Seed{} }
+func (*Seed) ProtoMessage() {}
+func (*Seed) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{97}
+}
+func (m *Seed) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Seed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Seed) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Seed.Merge(m, src)
+}
+func (m *Seed) XXX_Size() int {
+ return m.Size()
+}
+func (m *Seed) XXX_DiscardUnknown() {
+ xxx_messageInfo_Seed.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Seed proto.InternalMessageInfo
+
+func (m *SeedBackup) Reset() { *m = SeedBackup{} }
+func (*SeedBackup) ProtoMessage() {}
+func (*SeedBackup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{98}
+}
+func (m *SeedBackup) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedBackup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedBackup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedBackup.Merge(m, src)
+}
+func (m *SeedBackup) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedBackup) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedBackup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedBackup proto.InternalMessageInfo
+
+func (m *SeedDNS) Reset() { *m = SeedDNS{} }
+func (*SeedDNS) ProtoMessage() {}
+func (*SeedDNS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{99}
+}
+func (m *SeedDNS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedDNS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedDNS.Merge(m, src)
+}
+func (m *SeedDNS) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedDNS) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedDNS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedDNS proto.InternalMessageInfo
+
+func (m *SeedDNSProvider) Reset() { *m = SeedDNSProvider{} }
+func (*SeedDNSProvider) ProtoMessage() {}
+func (*SeedDNSProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{100}
+}
+func (m *SeedDNSProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedDNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedDNSProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedDNSProvider.Merge(m, src)
+}
+func (m *SeedDNSProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedDNSProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedDNSProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedDNSProvider proto.InternalMessageInfo
+
+func (m *SeedList) Reset() { *m = SeedList{} }
+func (*SeedList) ProtoMessage() {}
+func (*SeedList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{101}
+}
+func (m *SeedList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedList.Merge(m, src)
+}
+func (m *SeedList) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedList) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedList proto.InternalMessageInfo
+
+func (m *SeedNetworks) Reset() { *m = SeedNetworks{} }
+func (*SeedNetworks) ProtoMessage() {}
+func (*SeedNetworks) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{102}
+}
+func (m *SeedNetworks) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedNetworks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedNetworks) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedNetworks.Merge(m, src)
+}
+func (m *SeedNetworks) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedNetworks) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedNetworks.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedNetworks proto.InternalMessageInfo
+
+func (m *SeedProvider) Reset() { *m = SeedProvider{} }
+func (*SeedProvider) ProtoMessage() {}
+func (*SeedProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{103}
+}
+func (m *SeedProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedProvider.Merge(m, src)
+}
+func (m *SeedProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedProvider proto.InternalMessageInfo
+
+func (m *SeedSelector) Reset() { *m = SeedSelector{} }
+func (*SeedSelector) ProtoMessage() {}
+func (*SeedSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{104}
+}
+func (m *SeedSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSelector.Merge(m, src)
+}
+func (m *SeedSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSelector proto.InternalMessageInfo
+
+func (m *SeedSettingExcessCapacityReservation) Reset() { *m = SeedSettingExcessCapacityReservation{} }
+func (*SeedSettingExcessCapacityReservation) ProtoMessage() {}
+func (*SeedSettingExcessCapacityReservation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{105}
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingExcessCapacityReservation.Merge(m, src)
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingExcessCapacityReservation) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingExcessCapacityReservation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingExcessCapacityReservation proto.InternalMessageInfo
+
+func (m *SeedSettingLoadBalancerServices) Reset() { *m = SeedSettingLoadBalancerServices{} }
+func (*SeedSettingLoadBalancerServices) ProtoMessage() {}
+func (*SeedSettingLoadBalancerServices) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{106}
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingLoadBalancerServices.Merge(m, src)
+}
+func (m *SeedSettingLoadBalancerServices) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingLoadBalancerServices) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingLoadBalancerServices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingLoadBalancerServices proto.InternalMessageInfo
+
+func (m *SeedSettingScheduling) Reset() { *m = SeedSettingScheduling{} }
+func (*SeedSettingScheduling) ProtoMessage() {}
+func (*SeedSettingScheduling) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{107}
+}
+func (m *SeedSettingScheduling) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingScheduling) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingScheduling.Merge(m, src)
+}
+func (m *SeedSettingScheduling) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingScheduling) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingScheduling.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingScheduling proto.InternalMessageInfo
+
+func (m *SeedSettingShootDNS) Reset() { *m = SeedSettingShootDNS{} }
+func (*SeedSettingShootDNS) ProtoMessage() {}
+func (*SeedSettingShootDNS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{108}
+}
+func (m *SeedSettingShootDNS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingShootDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingShootDNS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingShootDNS.Merge(m, src)
+}
+func (m *SeedSettingShootDNS) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingShootDNS) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingShootDNS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingShootDNS proto.InternalMessageInfo
+
+func (m *SeedSettingVerticalPodAutoscaler) Reset() { *m = SeedSettingVerticalPodAutoscaler{} }
+func (*SeedSettingVerticalPodAutoscaler) ProtoMessage() {}
+func (*SeedSettingVerticalPodAutoscaler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{109}
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettingVerticalPodAutoscaler.Merge(m, src)
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettingVerticalPodAutoscaler) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettingVerticalPodAutoscaler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettingVerticalPodAutoscaler proto.InternalMessageInfo
+
+func (m *SeedSettings) Reset() { *m = SeedSettings{} }
+func (*SeedSettings) ProtoMessage() {}
+func (*SeedSettings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{110}
+}
+func (m *SeedSettings) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSettings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSettings.Merge(m, src)
+}
+func (m *SeedSettings) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSettings) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSettings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSettings proto.InternalMessageInfo
+
+func (m *SeedSpec) Reset() { *m = SeedSpec{} }
+func (*SeedSpec) ProtoMessage() {}
+func (*SeedSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{111}
+}
+func (m *SeedSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedSpec.Merge(m, src)
+}
+func (m *SeedSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedSpec proto.InternalMessageInfo
+
+func (m *SeedStatus) Reset() { *m = SeedStatus{} }
+func (*SeedStatus) ProtoMessage() {}
+func (*SeedStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{112}
+}
+func (m *SeedStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedStatus.Merge(m, src)
+}
+func (m *SeedStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedStatus proto.InternalMessageInfo
+
+func (m *SeedTaint) Reset() { *m = SeedTaint{} }
+func (*SeedTaint) ProtoMessage() {}
+func (*SeedTaint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{113}
+}
+func (m *SeedTaint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedTaint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedTaint.Merge(m, src)
+}
+func (m *SeedTaint) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedTaint) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedTaint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedTaint proto.InternalMessageInfo
+
+func (m *SeedVolume) Reset() { *m = SeedVolume{} }
+func (*SeedVolume) ProtoMessage() {}
+func (*SeedVolume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{114}
+}
+func (m *SeedVolume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedVolume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedVolume.Merge(m, src)
+}
+func (m *SeedVolume) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedVolume) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedVolume proto.InternalMessageInfo
+
+func (m *SeedVolumeProvider) Reset() { *m = SeedVolumeProvider{} }
+func (*SeedVolumeProvider) ProtoMessage() {}
+func (*SeedVolumeProvider) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{115}
+}
+func (m *SeedVolumeProvider) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SeedVolumeProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SeedVolumeProvider) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SeedVolumeProvider.Merge(m, src)
+}
+func (m *SeedVolumeProvider) XXX_Size() int {
+ return m.Size()
+}
+func (m *SeedVolumeProvider) XXX_DiscardUnknown() {
+ xxx_messageInfo_SeedVolumeProvider.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeedVolumeProvider proto.InternalMessageInfo
+
+func (m *ServiceAccountConfig) Reset() { *m = ServiceAccountConfig{} }
+func (*ServiceAccountConfig) ProtoMessage() {}
+func (*ServiceAccountConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{116}
+}
+func (m *ServiceAccountConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceAccountConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceAccountConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceAccountConfig.Merge(m, src)
+}
+func (m *ServiceAccountConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceAccountConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceAccountConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccountConfig proto.InternalMessageInfo
+
+func (m *Shoot) Reset() { *m = Shoot{} }
+func (*Shoot) ProtoMessage() {}
+func (*Shoot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{117}
+}
+func (m *Shoot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Shoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Shoot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Shoot.Merge(m, src)
+}
+func (m *Shoot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Shoot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Shoot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Shoot proto.InternalMessageInfo
+
+func (m *ShootList) Reset() { *m = ShootList{} }
+func (*ShootList) ProtoMessage() {}
+func (*ShootList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{118}
+}
+func (m *ShootList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootList.Merge(m, src)
+}
+func (m *ShootList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootList proto.InternalMessageInfo
+
+func (m *ShootMachineImage) Reset() { *m = ShootMachineImage{} }
+func (*ShootMachineImage) ProtoMessage() {}
+func (*ShootMachineImage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{119}
+}
+func (m *ShootMachineImage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootMachineImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootMachineImage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootMachineImage.Merge(m, src)
+}
+func (m *ShootMachineImage) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootMachineImage) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootMachineImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootMachineImage proto.InternalMessageInfo
+
+func (m *ShootNetworks) Reset() { *m = ShootNetworks{} }
+func (*ShootNetworks) ProtoMessage() {}
+func (*ShootNetworks) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{120}
+}
+func (m *ShootNetworks) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootNetworks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootNetworks) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootNetworks.Merge(m, src)
+}
+func (m *ShootNetworks) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootNetworks) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootNetworks.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootNetworks proto.InternalMessageInfo
+
+func (m *ShootSpec) Reset() { *m = ShootSpec{} }
+func (*ShootSpec) ProtoMessage() {}
+func (*ShootSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{121}
+}
+func (m *ShootSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootSpec.Merge(m, src)
+}
+func (m *ShootSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootSpec proto.InternalMessageInfo
+
+func (m *ShootStatus) Reset() { *m = ShootStatus{} }
+func (*ShootStatus) ProtoMessage() {}
+func (*ShootStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{122}
+}
+func (m *ShootStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ShootStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ShootStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShootStatus.Merge(m, src)
+}
+func (m *ShootStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ShootStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShootStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShootStatus proto.InternalMessageInfo
+
+func (m *Toleration) Reset() { *m = Toleration{} }
+func (*Toleration) ProtoMessage() {}
+func (*Toleration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{123}
+}
+func (m *Toleration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Toleration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Toleration.Merge(m, src)
+}
+func (m *Toleration) XXX_Size() int {
+ return m.Size()
+}
+func (m *Toleration) XXX_DiscardUnknown() {
+ xxx_messageInfo_Toleration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Toleration proto.InternalMessageInfo
+
+func (m *VerticalPodAutoscaler) Reset() { *m = VerticalPodAutoscaler{} }
+func (*VerticalPodAutoscaler) ProtoMessage() {}
+func (*VerticalPodAutoscaler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{124}
+}
+func (m *VerticalPodAutoscaler) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *VerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *VerticalPodAutoscaler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VerticalPodAutoscaler.Merge(m, src)
+}
+func (m *VerticalPodAutoscaler) XXX_Size() int {
+ return m.Size()
+}
+func (m *VerticalPodAutoscaler) XXX_DiscardUnknown() {
+ xxx_messageInfo_VerticalPodAutoscaler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VerticalPodAutoscaler proto.InternalMessageInfo
+
+func (m *Volume) Reset() { *m = Volume{} }
+func (*Volume) ProtoMessage() {}
+func (*Volume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{125}
+}
+func (m *Volume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Volume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Volume.Merge(m, src)
+}
+func (m *Volume) XXX_Size() int {
+ return m.Size()
+}
+func (m *Volume) XXX_DiscardUnknown() {
+ xxx_messageInfo_Volume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Volume proto.InternalMessageInfo
+
+func (m *VolumeType) Reset() { *m = VolumeType{} }
+func (*VolumeType) ProtoMessage() {}
+func (*VolumeType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{126}
+}
+func (m *VolumeType) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *VolumeType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *VolumeType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VolumeType.Merge(m, src)
+}
+func (m *VolumeType) XXX_Size() int {
+ return m.Size()
+}
+func (m *VolumeType) XXX_DiscardUnknown() {
+ xxx_messageInfo_VolumeType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeType proto.InternalMessageInfo
+
+func (m *WatchCacheSizes) Reset() { *m = WatchCacheSizes{} }
+func (*WatchCacheSizes) ProtoMessage() {}
+func (*WatchCacheSizes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{127}
+}
+func (m *WatchCacheSizes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchCacheSizes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WatchCacheSizes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchCacheSizes.Merge(m, src)
+}
+func (m *WatchCacheSizes) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchCacheSizes) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchCacheSizes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCacheSizes proto.InternalMessageInfo
+
+func (m *Worker) Reset() { *m = Worker{} }
+func (*Worker) ProtoMessage() {}
+func (*Worker) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{128}
+}
+func (m *Worker) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Worker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Worker) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Worker.Merge(m, src)
+}
+func (m *Worker) XXX_Size() int {
+ return m.Size()
+}
+func (m *Worker) XXX_DiscardUnknown() {
+ xxx_messageInfo_Worker.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Worker proto.InternalMessageInfo
+
+func (m *WorkerKubernetes) Reset() { *m = WorkerKubernetes{} }
+func (*WorkerKubernetes) ProtoMessage() {}
+func (*WorkerKubernetes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{129}
+}
+func (m *WorkerKubernetes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WorkerKubernetes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WorkerKubernetes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WorkerKubernetes.Merge(m, src)
+}
+func (m *WorkerKubernetes) XXX_Size() int {
+ return m.Size()
+}
+func (m *WorkerKubernetes) XXX_DiscardUnknown() {
+ xxx_messageInfo_WorkerKubernetes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WorkerKubernetes proto.InternalMessageInfo
+
+func (m *WorkerSystemComponents) Reset() { *m = WorkerSystemComponents{} }
+func (*WorkerSystemComponents) ProtoMessage() {}
+func (*WorkerSystemComponents) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ca37af0df9a5bbd2, []int{130}
+}
+func (m *WorkerSystemComponents) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WorkerSystemComponents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WorkerSystemComponents) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WorkerSystemComponents.Merge(m, src)
+}
+func (m *WorkerSystemComponents) XXX_Size() int {
+ return m.Size()
+}
+func (m *WorkerSystemComponents) XXX_DiscardUnknown() {
+ xxx_messageInfo_WorkerSystemComponents.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WorkerSystemComponents proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Addon)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Addon")
+ proto.RegisterType((*Addons)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Addons")
+ proto.RegisterType((*AdmissionPlugin)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.AdmissionPlugin")
+ proto.RegisterType((*Alerting)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Alerting")
+ proto.RegisterType((*AuditConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.AuditConfig")
+ proto.RegisterType((*AuditPolicy)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.AuditPolicy")
+ proto.RegisterType((*AvailabilityZone)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.AvailabilityZone")
+ proto.RegisterType((*BackupBucket)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupBucket")
+ proto.RegisterType((*BackupBucketList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupBucketList")
+ proto.RegisterType((*BackupBucketProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupBucketProvider")
+ proto.RegisterType((*BackupBucketSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupBucketSpec")
+ proto.RegisterType((*BackupBucketStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupBucketStatus")
+ proto.RegisterType((*BackupEntry)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupEntry")
+ proto.RegisterType((*BackupEntryList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupEntryList")
+ proto.RegisterType((*BackupEntrySpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupEntrySpec")
+ proto.RegisterType((*BackupEntryStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.BackupEntryStatus")
+ proto.RegisterType((*CRI)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.CRI")
+ proto.RegisterType((*CloudInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.CloudInfo")
+ proto.RegisterType((*CloudProfile)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.CloudProfile")
+ proto.RegisterType((*CloudProfileList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.CloudProfileList")
+ proto.RegisterType((*CloudProfileSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.CloudProfileSpec")
+ proto.RegisterType((*ClusterAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ClusterAutoscaler")
+ proto.RegisterType((*ClusterInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ClusterInfo")
+ proto.RegisterType((*Condition)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Condition")
+ proto.RegisterType((*ContainerRuntime)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ContainerRuntime")
+ proto.RegisterType((*ControllerDeployment)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerDeployment")
+ proto.RegisterType((*ControllerInstallation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerInstallation")
+ proto.RegisterType((*ControllerInstallationList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerInstallationList")
+ proto.RegisterType((*ControllerInstallationSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerInstallationSpec")
+ proto.RegisterType((*ControllerInstallationStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerInstallationStatus")
+ proto.RegisterType((*ControllerRegistration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerRegistration")
+ proto.RegisterType((*ControllerRegistrationList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerRegistrationList")
+ proto.RegisterType((*ControllerRegistrationSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerRegistrationSpec")
+ proto.RegisterType((*ControllerResource)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ControllerResource")
+ proto.RegisterType((*DNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DNS")
+ proto.RegisterType((*DNSIncludeExclude)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DNSIncludeExclude")
+ proto.RegisterType((*DNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DNSProvider")
+ proto.RegisterType((*DataVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DataVolume")
+ proto.RegisterType((*Endpoint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Endpoint")
+ proto.RegisterType((*ExpirableVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ExpirableVersion")
+ proto.RegisterType((*Extension)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Extension")
+ proto.RegisterType((*Gardener)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Gardener")
+ proto.RegisterType((*Hibernation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Hibernation")
+ proto.RegisterType((*HibernationSchedule)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.HibernationSchedule")
+ proto.RegisterType((*HorizontalPodAutoscalerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.HorizontalPodAutoscalerConfig")
+ proto.RegisterType((*Ingress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Ingress")
+ proto.RegisterType((*IngressController)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.IngressController")
+ proto.RegisterType((*KubeAPIServerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeAPIServerConfig")
+ proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeAPIServerConfig.RuntimeConfigEntry")
+ proto.RegisterType((*KubeAPIServerRequests)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeAPIServerRequests")
+ proto.RegisterType((*KubeControllerManagerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeControllerManagerConfig")
+ proto.RegisterType((*KubeProxyConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeProxyConfig")
+ proto.RegisterType((*KubeSchedulerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeSchedulerConfig")
+ proto.RegisterType((*KubeletConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfig")
+ proto.RegisterType((*KubeletConfigEviction)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigEviction")
+ proto.RegisterType((*KubeletConfigEvictionMinimumReclaim)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigEvictionMinimumReclaim")
+ proto.RegisterType((*KubeletConfigEvictionSoftGracePeriod)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigEvictionSoftGracePeriod")
+ proto.RegisterType((*KubeletConfigReserved)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigReserved")
+ proto.RegisterType((*Kubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Kubernetes")
+ proto.RegisterType((*KubernetesConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesConfig")
+ proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesConfig.FeatureGatesEntry")
+ proto.RegisterType((*KubernetesDashboard)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesDashboard")
+ proto.RegisterType((*KubernetesInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesInfo")
+ proto.RegisterType((*KubernetesSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesSettings")
+ proto.RegisterType((*LastError)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.LastError")
+ proto.RegisterType((*LastOperation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.LastOperation")
+ proto.RegisterType((*Machine)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Machine")
+ proto.RegisterType((*MachineControllerManagerSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineControllerManagerSettings")
+ proto.RegisterType((*MachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineImage")
+ proto.RegisterType((*MachineImageVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineImageVersion")
+ proto.RegisterType((*MachineType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineType")
+ proto.RegisterType((*MachineTypeStorage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineTypeStorage")
+ proto.RegisterType((*Maintenance)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Maintenance")
+ proto.RegisterType((*MaintenanceAutoUpdate)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MaintenanceAutoUpdate")
+ proto.RegisterType((*MaintenanceTimeWindow)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MaintenanceTimeWindow")
+ proto.RegisterType((*Monitoring)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Monitoring")
+ proto.RegisterType((*NamedResourceReference)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.NamedResourceReference")
+ proto.RegisterType((*Networking)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Networking")
+ proto.RegisterType((*NginxIngress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.NginxIngress")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.NginxIngress.ConfigEntry")
+ proto.RegisterType((*OIDCConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.OIDCConfig")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.OIDCConfig.RequiredClaimsEntry")
+ proto.RegisterType((*OpenIDConnectClientAuthentication)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.OpenIDConnectClientAuthentication")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.OpenIDConnectClientAuthentication.ExtraConfigEntry")
+ proto.RegisterType((*Plant)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Plant")
+ proto.RegisterType((*PlantList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.PlantList")
+ proto.RegisterType((*PlantSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.PlantSpec")
+ proto.RegisterType((*PlantStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.PlantStatus")
+ proto.RegisterType((*Project)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Project")
+ proto.RegisterType((*ProjectList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectList")
+ proto.RegisterType((*ProjectMember)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectMember")
+ proto.RegisterType((*ProjectSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectSpec")
+ proto.RegisterType((*ProjectStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectStatus")
+ proto.RegisterType((*ProjectTolerations)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectTolerations")
+ proto.RegisterType((*Provider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Provider")
+ proto.RegisterType((*Quota)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Quota")
+ proto.RegisterType((*QuotaList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.QuotaList")
+ proto.RegisterType((*QuotaSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.QuotaSpec")
+ proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.QuotaSpec.MetricsEntry")
+ proto.RegisterType((*Region)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Region")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Region.LabelsEntry")
+ proto.RegisterType((*ResourceWatchCacheSize)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ResourceWatchCacheSize")
+ proto.RegisterType((*SecretBinding)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SecretBinding")
+ proto.RegisterType((*SecretBindingList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SecretBindingList")
+ proto.RegisterType((*Seed)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Seed")
+ proto.RegisterType((*SeedBackup)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedBackup")
+ proto.RegisterType((*SeedDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedDNS")
+ proto.RegisterType((*SeedDNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedDNSProvider")
+ proto.RegisterType((*SeedList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedList")
+ proto.RegisterType((*SeedNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedNetworks")
+ proto.RegisterType((*SeedProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedProvider")
+ proto.RegisterType((*SeedSelector)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSelector")
+ proto.RegisterType((*SeedSettingExcessCapacityReservation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingExcessCapacityReservation")
+ proto.RegisterType((*SeedSettingLoadBalancerServices)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingLoadBalancerServices")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingLoadBalancerServices.AnnotationsEntry")
+ proto.RegisterType((*SeedSettingScheduling)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingScheduling")
+ proto.RegisterType((*SeedSettingShootDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingShootDNS")
+ proto.RegisterType((*SeedSettingVerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingVerticalPodAutoscaler")
+ proto.RegisterType((*SeedSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettings")
+ proto.RegisterType((*SeedSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSpec")
+ proto.RegisterType((*SeedStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedStatus")
+ proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedStatus.AllocatableEntry")
+ proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedStatus.CapacityEntry")
+ proto.RegisterType((*SeedTaint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedTaint")
+ proto.RegisterType((*SeedVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedVolume")
+ proto.RegisterType((*SeedVolumeProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedVolumeProvider")
+ proto.RegisterType((*ServiceAccountConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ServiceAccountConfig")
+ proto.RegisterType((*Shoot)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Shoot")
+ proto.RegisterType((*ShootList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootList")
+ proto.RegisterType((*ShootMachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootMachineImage")
+ proto.RegisterType((*ShootNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootNetworks")
+ proto.RegisterType((*ShootSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootSpec")
+ proto.RegisterType((*ShootStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootStatus")
+ proto.RegisterType((*Toleration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Toleration")
+ proto.RegisterType((*VerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.VerticalPodAutoscaler")
+ proto.RegisterType((*Volume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Volume")
+ proto.RegisterType((*VolumeType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.VolumeType")
+ proto.RegisterType((*WatchCacheSizes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.WatchCacheSizes")
+ proto.RegisterType((*Worker)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Worker")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Worker.AnnotationsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Worker.LabelsEntry")
+ proto.RegisterType((*WorkerKubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.WorkerKubernetes")
+ proto.RegisterType((*WorkerSystemComponents)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.WorkerSystemComponents")
+}
+
+func init() {
+ proto.RegisterFile("github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto", fileDescriptor_ca37af0df9a5bbd2)
+}
+
+var fileDescriptor_ca37af0df9a5bbd2 = []byte{
+ // 8855 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7d, 0x6d, 0x8c, 0x24, 0xc7,
+ 0x75, 0x18, 0x7b, 0xf6, 0x6b, 0xf6, 0xcd, 0x7e, 0xd6, 0x7d, 0x70, 0xb9, 0x24, 0x6f, 0x4e, 0x2d,
+ 0x51, 0x20, 0x23, 0x69, 0xcf, 0xa4, 0x24, 0x53, 0x24, 0x45, 0x91, 0x3b, 0x33, 0x7b, 0x77, 0xa3,
+ 0xbb, 0xdd, 0x1b, 0xd6, 0xdc, 0x1e, 0x25, 0x3a, 0x61, 0xd4, 0xdb, 0x5d, 0x3b, 0xdb, 0xdc, 0x9e,
+ 0xee, 0x61, 0x77, 0xcf, 0xde, 0xee, 0xc9, 0x81, 0x14, 0x21, 0xb2, 0xf3, 0xe1, 0x38, 0x86, 0x63,
+ 0x45, 0x88, 0x95, 0xc0, 0x32, 0x82, 0xc4, 0x08, 0x1c, 0xc8, 0x0e, 0x12, 0x19, 0x09, 0x92, 0x00,
+ 0x86, 0x8d, 0x40, 0xb2, 0x91, 0x00, 0x42, 0x10, 0x21, 0x0a, 0x12, 0xaf, 0xa2, 0x0d, 0x20, 0x24,
+ 0x48, 0xfe, 0x19, 0xf9, 0x73, 0x30, 0x10, 0xa3, 0x3e, 0xba, 0xba, 0xba, 0xa7, 0x7b, 0x77, 0xb6,
+ 0x67, 0xf7, 0x48, 0xfe, 0xda, 0x9d, 0x7a, 0x55, 0xef, 0x55, 0x57, 0xbd, 0x7a, 0xf5, 0xde, 0xab,
+ 0x57, 0xaf, 0xa0, 0xd6, 0xb1, 0xc3, 0x9d, 0xfe, 0xd6, 0x8a, 0xe9, 0x75, 0xaf, 0x75, 0x0c, 0xdf,
+ 0x22, 0x2e, 0xf1, 0xe3, 0x7f, 0x7a, 0xbb, 0x9d, 0x6b, 0x46, 0xcf, 0x0e, 0xae, 0x99, 0x9e, 0x4f,
+ 0xae, 0xed, 0x3d, 0xbf, 0x45, 0x42, 0xe3, 0xf9, 0x6b, 0x1d, 0x0a, 0x33, 0x42, 0x62, 0xad, 0xf4,
+ 0x7c, 0x2f, 0xf4, 0xd0, 0x0b, 0x31, 0x8e, 0x95, 0xa8, 0x69, 0xfc, 0x4f, 0x6f, 0xb7, 0xb3, 0x42,
+ 0x71, 0xac, 0x50, 0x1c, 0x2b, 0x02, 0xc7, 0xf2, 0x27, 0x54, 0xba, 0x5e, 0xc7, 0xbb, 0xc6, 0x50,
+ 0x6d, 0xf5, 0xb7, 0xd9, 0x2f, 0xf6, 0x83, 0xfd, 0xc7, 0x49, 0x2c, 0x3f, 0xb7, 0xfb, 0x99, 0x60,
+ 0xc5, 0xf6, 0x68, 0x67, 0xae, 0x19, 0xfd, 0xd0, 0x0b, 0x4c, 0xc3, 0xb1, 0xdd, 0xce, 0xb5, 0xbd,
+ 0x81, 0xde, 0x2c, 0xeb, 0x4a, 0x55, 0xd1, 0xed, 0x63, 0xeb, 0xf8, 0x5b, 0x86, 0x99, 0x55, 0xe7,
+ 0x53, 0x71, 0x9d, 0xae, 0x61, 0xee, 0xd8, 0x2e, 0xf1, 0x0f, 0xa2, 0x01, 0xb9, 0xe6, 0x93, 0xc0,
+ 0xeb, 0xfb, 0x26, 0x39, 0x55, 0xab, 0xe0, 0x5a, 0x97, 0x84, 0x46, 0x16, 0xad, 0x6b, 0x79, 0xad,
+ 0xfc, 0xbe, 0x1b, 0xda, 0xdd, 0x41, 0x32, 0x3f, 0x7b, 0x52, 0x83, 0xc0, 0xdc, 0x21, 0x5d, 0x63,
+ 0xa0, 0xdd, 0x27, 0xf3, 0xda, 0xf5, 0x43, 0xdb, 0xb9, 0x66, 0xbb, 0x61, 0x10, 0xfa, 0xe9, 0x46,
+ 0xfa, 0x0b, 0x30, 0xb1, 0x6a, 0x59, 0x9e, 0x8b, 0x9e, 0x83, 0x29, 0xe2, 0x1a, 0x5b, 0x0e, 0xb1,
+ 0x96, 0xb4, 0xab, 0xda, 0xb3, 0xe5, 0xda, 0xfc, 0xf7, 0x0f, 0xab, 0x8f, 0x1d, 0x1d, 0x56, 0xa7,
+ 0xd6, 0x78, 0x31, 0x8e, 0xe0, 0xfa, 0x37, 0x4a, 0x30, 0xc9, 0x1a, 0x05, 0xe8, 0x57, 0x35, 0xb8,
+ 0xb0, 0xdb, 0xdf, 0x22, 0xbe, 0x4b, 0x42, 0x12, 0x34, 0x8c, 0x60, 0x67, 0xcb, 0x33, 0x7c, 0x8e,
+ 0xa2, 0xf2, 0xc2, 0x8d, 0x95, 0xd3, 0x73, 0xcf, 0xca, 0xad, 0x41, 0x74, 0xb5, 0xc7, 0x8f, 0x0e,
+ 0xab, 0x17, 0x32, 0x00, 0x38, 0x8b, 0x38, 0xda, 0x83, 0x19, 0xb7, 0x63, 0xbb, 0xfb, 0x4d, 0xb7,
+ 0xe3, 0x93, 0x20, 0x58, 0x2a, 0xb1, 0xce, 0xbc, 0x5e, 0xa4, 0x33, 0x1b, 0x0a, 0x9e, 0xda, 0xc2,
+ 0xd1, 0x61, 0x75, 0x46, 0x2d, 0xc1, 0x09, 0x3a, 0xfa, 0x2f, 0x68, 0x30, 0xbf, 0x6a, 0x75, 0xed,
+ 0x20, 0xb0, 0x3d, 0xb7, 0xe5, 0xf4, 0x3b, 0xb6, 0x8b, 0xae, 0xc2, 0xb8, 0x6b, 0x74, 0x09, 0x1b,
+ 0x90, 0xe9, 0xda, 0x8c, 0x18, 0xd3, 0xf1, 0x0d, 0xa3, 0x4b, 0x30, 0x83, 0xa0, 0x37, 0x60, 0xd2,
+ 0xf4, 0xdc, 0x6d, 0xbb, 0x23, 0xfa, 0xf9, 0x89, 0x15, 0x3e, 0x8f, 0x2b, 0xea, 0x3c, 0xb2, 0xee,
+ 0x89, 0xf9, 0x5f, 0xc1, 0xc6, 0xfd, 0xb5, 0xfd, 0x90, 0xb8, 0x94, 0x4c, 0x0d, 0x8e, 0x0e, 0xab,
+ 0x93, 0x75, 0x86, 0x00, 0x0b, 0x44, 0xfa, 0x75, 0x28, 0xaf, 0x3a, 0xc4, 0x0f, 0x6d, 0xb7, 0x83,
+ 0x5e, 0x86, 0x39, 0xd2, 0x35, 0x6c, 0x07, 0x13, 0x93, 0xd8, 0x7b, 0xc4, 0x0f, 0x96, 0xb4, 0xab,
+ 0x63, 0xcf, 0x4e, 0xd7, 0xd0, 0xd1, 0x61, 0x75, 0x6e, 0x2d, 0x01, 0xc1, 0xa9, 0x9a, 0xfa, 0x5f,
+ 0xd5, 0xa0, 0xb2, 0xda, 0xb7, 0xec, 0x90, 0xe3, 0x47, 0x3e, 0x54, 0x0c, 0xfa, 0xb3, 0xe5, 0x39,
+ 0xb6, 0x79, 0x20, 0x26, 0xf9, 0xb5, 0x22, 0xe3, 0xba, 0x1a, 0xa3, 0xa9, 0xcd, 0x1f, 0x1d, 0x56,
+ 0x2b, 0x4a, 0x01, 0x56, 0x89, 0xe8, 0x3b, 0xa0, 0xc2, 0xd0, 0x17, 0x61, 0x86, 0x7f, 0xe4, 0xba,
+ 0xd1, 0xc3, 0x64, 0x5b, 0xf4, 0xe1, 0xc3, 0xca, 0x98, 0x45, 0x84, 0x56, 0xee, 0x6c, 0xbd, 0x43,
+ 0xcc, 0x10, 0x93, 0x6d, 0xe2, 0x13, 0xd7, 0x24, 0x7c, 0xfa, 0xea, 0x4a, 0x63, 0x9c, 0x40, 0xa5,
+ 0xff, 0x58, 0x83, 0x85, 0xd5, 0x3d, 0xc3, 0x76, 0x8c, 0x2d, 0xdb, 0xb1, 0xc3, 0x83, 0xb7, 0x3c,
+ 0x97, 0x0c, 0x31, 0x7f, 0x9b, 0xf0, 0x78, 0xdf, 0x35, 0x78, 0x3b, 0x87, 0xac, 0xf3, 0x19, 0xbb,
+ 0x7b, 0xd0, 0x23, 0x94, 0xf1, 0xe8, 0x48, 0x3f, 0x79, 0x74, 0x58, 0x7d, 0x7c, 0x33, 0xbb, 0x0a,
+ 0xce, 0x6b, 0x8b, 0x30, 0x5c, 0x56, 0x40, 0xf7, 0x3c, 0xa7, 0xdf, 0x15, 0x58, 0xc7, 0x18, 0xd6,
+ 0xe5, 0xa3, 0xc3, 0xea, 0xe5, 0xcd, 0xcc, 0x1a, 0x38, 0xa7, 0xa5, 0xfe, 0xfd, 0x12, 0xcc, 0xd4,
+ 0x0c, 0x73, 0xb7, 0xdf, 0xab, 0xf5, 0xcd, 0x5d, 0x12, 0xa2, 0x2f, 0x41, 0x99, 0x8a, 0x2d, 0xcb,
+ 0x08, 0x0d, 0x31, 0x92, 0x3f, 0x93, 0xcb, 0x7d, 0x6c, 0x12, 0x69, 0xed, 0x78, 0x6c, 0xd7, 0x49,
+ 0x68, 0xd4, 0x90, 0x18, 0x13, 0x88, 0xcb, 0xb0, 0xc4, 0x8a, 0xb6, 0x61, 0x3c, 0xe8, 0x11, 0x53,
+ 0xf0, 0x76, 0xa3, 0x08, 0xaf, 0xa8, 0x3d, 0x6e, 0xf7, 0x88, 0x19, 0xcf, 0x02, 0xfd, 0x85, 0x19,
+ 0x7e, 0xe4, 0xc2, 0x64, 0x10, 0x1a, 0x61, 0x9f, 0x0e, 0x0f, 0xa5, 0x74, 0x7d, 0x64, 0x4a, 0x0c,
+ 0x5b, 0x6d, 0x4e, 0xd0, 0x9a, 0xe4, 0xbf, 0xb1, 0xa0, 0xa2, 0xff, 0x17, 0x0d, 0x16, 0xd4, 0xea,
+ 0xb7, 0xed, 0x20, 0x44, 0x7f, 0x71, 0x60, 0x38, 0x57, 0x86, 0x1b, 0x4e, 0xda, 0x9a, 0x0d, 0xe6,
+ 0x82, 0x20, 0x57, 0x8e, 0x4a, 0x94, 0xa1, 0x24, 0x30, 0x61, 0x87, 0xa4, 0xcb, 0xd9, 0xaa, 0xa0,
+ 0x3c, 0x53, 0xbb, 0x5c, 0x9b, 0x15, 0xc4, 0x26, 0x9a, 0x14, 0x2d, 0xe6, 0xd8, 0xf5, 0x2f, 0xc1,
+ 0x45, 0xb5, 0x56, 0xcb, 0xf7, 0xf6, 0x6c, 0x8b, 0xf8, 0x74, 0x25, 0x84, 0x07, 0xbd, 0x81, 0x95,
+ 0x40, 0x39, 0x0b, 0x33, 0x08, 0xfa, 0x28, 0x4c, 0xfa, 0xa4, 0x63, 0x7b, 0x2e, 0x9b, 0xed, 0xe9,
+ 0x78, 0xec, 0x30, 0x2b, 0xc5, 0x02, 0xaa, 0xff, 0xbf, 0x52, 0x72, 0xec, 0xe8, 0x34, 0xa2, 0x3d,
+ 0x28, 0xf7, 0x04, 0x29, 0x31, 0x76, 0x37, 0x47, 0xfd, 0xc0, 0xa8, 0xeb, 0xf1, 0xa8, 0x46, 0x25,
+ 0x58, 0xd2, 0x42, 0x36, 0xcc, 0x45, 0xff, 0xd7, 0x47, 0x10, 0xc3, 0x4c, 0x9c, 0xb6, 0x12, 0x88,
+ 0x70, 0x0a, 0x31, 0xba, 0x0b, 0xd3, 0x01, 0x31, 0x7d, 0x42, 0x65, 0x92, 0x60, 0xd3, 0x4c, 0xc1,
+ 0xd5, 0x8e, 0x2a, 0x09, 0xc1, 0xb5, 0x28, 0xba, 0x3f, 0x2d, 0x01, 0x38, 0x46, 0x84, 0x9e, 0x85,
+ 0x72, 0x40, 0x88, 0x45, 0x25, 0xd2, 0xd2, 0x38, 0x9f, 0x1b, 0xfa, 0xa9, 0x6d, 0x51, 0x86, 0x25,
+ 0x54, 0xff, 0xf6, 0x38, 0xa0, 0x41, 0x16, 0x57, 0x47, 0x80, 0x97, 0x88, 0xf1, 0x1f, 0x65, 0x04,
+ 0xc4, 0x6a, 0x49, 0x21, 0x46, 0x0f, 0x60, 0xd6, 0x31, 0x82, 0xf0, 0x4e, 0x8f, 0xea, 0x20, 0x11,
+ 0xa3, 0x54, 0x5e, 0x58, 0x2d, 0x32, 0xd3, 0xb7, 0x55, 0x44, 0xb5, 0xc5, 0xa3, 0xc3, 0xea, 0x6c,
+ 0xa2, 0x08, 0x27, 0x49, 0xa1, 0x77, 0x60, 0x9a, 0x16, 0xac, 0xf9, 0xbe, 0xe7, 0x8b, 0xd1, 0x7f,
+ 0xb5, 0x28, 0x5d, 0x86, 0xa4, 0x36, 0x4b, 0xe7, 0x44, 0xfe, 0xc4, 0x31, 0x7a, 0xf4, 0x79, 0x40,
+ 0xde, 0x56, 0x40, 0xfc, 0x3d, 0x62, 0xdd, 0xe0, 0x0a, 0x17, 0xfd, 0x58, 0x3a, 0x3b, 0x63, 0xb5,
+ 0x65, 0x31, 0x9b, 0xe8, 0xce, 0x40, 0x0d, 0x9c, 0xd1, 0x0a, 0xed, 0x02, 0x92, 0x4a, 0x9b, 0x64,
+ 0x80, 0xa5, 0x89, 0xe1, 0xd9, 0xe7, 0x32, 0x25, 0x76, 0x63, 0x00, 0x05, 0xce, 0x40, 0xab, 0xff,
+ 0xfb, 0x12, 0x54, 0x38, 0x8b, 0xac, 0xb9, 0xa1, 0x7f, 0xf0, 0x08, 0x36, 0x08, 0x92, 0xd8, 0x20,
+ 0xea, 0xc5, 0xd7, 0x3c, 0xeb, 0x70, 0xee, 0xfe, 0xd0, 0x4d, 0xed, 0x0f, 0x6b, 0xa3, 0x12, 0x3a,
+ 0x7e, 0x7b, 0xf8, 0xa1, 0x06, 0xf3, 0x4a, 0xed, 0x47, 0xb0, 0x3b, 0x58, 0xc9, 0xdd, 0xe1, 0xb5,
+ 0x11, 0xbf, 0x2f, 0x67, 0x73, 0xf0, 0x12, 0x9f, 0xc5, 0x04, 0xf7, 0x0b, 0x00, 0x5b, 0x4c, 0x9c,
+ 0x6c, 0xc4, 0x7a, 0x92, 0x9c, 0xf2, 0x9a, 0x84, 0x60, 0xa5, 0x56, 0x42, 0x66, 0x95, 0x8e, 0x95,
+ 0x59, 0xdf, 0x2b, 0xc1, 0xe2, 0xc0, 0xb0, 0x0f, 0xca, 0x11, 0xed, 0x3d, 0x92, 0x23, 0xa5, 0xf7,
+ 0x42, 0x8e, 0x8c, 0x15, 0x91, 0x23, 0xfa, 0x1f, 0x68, 0x30, 0x56, 0xc7, 0x4d, 0xf4, 0xb1, 0x84,
+ 0x46, 0xfb, 0xb8, 0xaa, 0xd1, 0x3e, 0x3c, 0xac, 0x4e, 0xd5, 0x71, 0x53, 0x51, 0x6e, 0xff, 0xb6,
+ 0x06, 0x8b, 0xa6, 0xe7, 0x86, 0x06, 0x65, 0x4d, 0xcc, 0xc5, 0x7e, 0xc4, 0x62, 0x85, 0x94, 0xb9,
+ 0x7a, 0x0a, 0x59, 0xed, 0x09, 0xd1, 0x81, 0xc5, 0x34, 0x24, 0xc0, 0x83, 0x94, 0xf5, 0x4d, 0x98,
+ 0xae, 0x3b, 0x5e, 0xdf, 0x6a, 0xba, 0xdb, 0xde, 0x19, 0x6a, 0x24, 0x3f, 0xd2, 0x60, 0x86, 0xe1,
+ 0x6d, 0xf9, 0xde, 0xb6, 0xed, 0x90, 0x0f, 0x86, 0x62, 0xac, 0xf6, 0x38, 0x4f, 0xf0, 0x31, 0x45,
+ 0x55, 0xad, 0xf8, 0x01, 0x51, 0x54, 0xd5, 0x2e, 0xe7, 0xc8, 0xa2, 0x6f, 0x4c, 0x25, 0xbf, 0x8c,
+ 0x49, 0xa3, 0x67, 0xa1, 0x6c, 0x1a, 0xb5, 0xbe, 0x6b, 0x39, 0x92, 0x2f, 0x68, 0x2f, 0xeb, 0xab,
+ 0xbc, 0x0c, 0x4b, 0x28, 0x7a, 0x00, 0x10, 0x3b, 0x0f, 0xc4, 0x34, 0x5c, 0x1f, 0xcd, 0x61, 0xd1,
+ 0x26, 0x21, 0xb5, 0xb9, 0x83, 0x78, 0xea, 0x63, 0x18, 0x56, 0xa8, 0xa1, 0xbf, 0x02, 0xb3, 0x62,
+ 0x90, 0x9b, 0x5d, 0xa3, 0x23, 0x6c, 0xba, 0x82, 0x23, 0xb5, 0xae, 0x20, 0xaa, 0x5d, 0x12, 0x84,
+ 0x67, 0xd5, 0xd2, 0x00, 0x27, 0xa9, 0xa1, 0x03, 0x98, 0xe9, 0xaa, 0x76, 0xea, 0x78, 0xf1, 0x2d,
+ 0x43, 0xb1, 0x59, 0x6b, 0x17, 0x05, 0xf1, 0x99, 0x84, 0x85, 0x9b, 0x20, 0x95, 0xa1, 0x6e, 0x4f,
+ 0x9c, 0x97, 0xba, 0x4d, 0x60, 0x8a, 0x2f, 0xef, 0x60, 0x69, 0x92, 0x7d, 0xe0, 0xcb, 0x45, 0x3e,
+ 0x90, 0x4b, 0x8a, 0xd8, 0x1b, 0xc6, 0x7f, 0x07, 0x38, 0xc2, 0x8d, 0xf6, 0x60, 0x86, 0xee, 0x56,
+ 0x6d, 0xe2, 0x10, 0x33, 0xf4, 0xfc, 0xa5, 0xa9, 0xe2, 0xde, 0xa6, 0xb6, 0x82, 0x87, 0xbb, 0x2b,
+ 0xd4, 0x12, 0x9c, 0xa0, 0x23, 0xa5, 0x5f, 0x39, 0x57, 0xfa, 0xf5, 0xa1, 0xb2, 0xa7, 0xf8, 0x0d,
+ 0xa6, 0xd9, 0x20, 0x7c, 0xae, 0x48, 0xc7, 0x62, 0x27, 0x42, 0xed, 0x82, 0x20, 0x54, 0x51, 0x1d,
+ 0x0e, 0x2a, 0x1d, 0xfd, 0x17, 0x27, 0x61, 0xb1, 0xee, 0xf4, 0x83, 0x90, 0xf8, 0xab, 0xc2, 0x9d,
+ 0x4b, 0x7c, 0xf4, 0x35, 0x0d, 0x2e, 0xb3, 0x7f, 0x1b, 0xde, 0x7d, 0xb7, 0x41, 0x1c, 0xe3, 0x60,
+ 0x75, 0x9b, 0xd6, 0xb0, 0xac, 0xd3, 0x49, 0xa0, 0x46, 0x5f, 0xec, 0xd4, 0xcc, 0x01, 0xd2, 0xce,
+ 0xc4, 0x88, 0x73, 0x28, 0xa1, 0xbf, 0xa5, 0xc1, 0x13, 0x19, 0xa0, 0x06, 0x71, 0x48, 0x48, 0x84,
+ 0x0c, 0x38, 0x6d, 0x3f, 0x9e, 0x3e, 0x3a, 0xac, 0x3e, 0xd1, 0xce, 0x43, 0x8a, 0xf3, 0xe9, 0xa1,
+ 0x5f, 0xd6, 0x60, 0x39, 0x03, 0x7a, 0xdd, 0xb0, 0x9d, 0xbe, 0x4f, 0x84, 0xa2, 0x7a, 0xda, 0xee,
+ 0x5c, 0x39, 0x3a, 0xac, 0x2e, 0xb7, 0x73, 0xb1, 0xe2, 0x63, 0x28, 0xa2, 0xaf, 0xc0, 0x25, 0x09,
+ 0xdd, 0x74, 0x5d, 0x42, 0x2c, 0x62, 0xdd, 0xb5, 0x85, 0x5d, 0x79, 0xfa, 0xae, 0x3c, 0x71, 0x74,
+ 0x58, 0xbd, 0xd4, 0xce, 0x42, 0x88, 0xb3, 0xe9, 0xa0, 0x0e, 0x3c, 0x1d, 0x03, 0x42, 0xdb, 0xb1,
+ 0x1f, 0x30, 0x4c, 0x77, 0x77, 0x7c, 0x12, 0xec, 0x78, 0x8e, 0xc5, 0x84, 0x85, 0x56, 0xfb, 0xd0,
+ 0xd1, 0x61, 0xf5, 0xe9, 0xf6, 0x71, 0x15, 0xf1, 0xf1, 0x78, 0x90, 0x05, 0x33, 0x81, 0x69, 0xb8,
+ 0x4d, 0x37, 0x24, 0xfe, 0x9e, 0xe1, 0x2c, 0x4d, 0x16, 0xfa, 0x40, 0xbe, 0x44, 0x15, 0x3c, 0x38,
+ 0x81, 0x55, 0xff, 0xdf, 0x1a, 0x54, 0xc4, 0x4a, 0x60, 0x0a, 0xcb, 0x16, 0x4c, 0x98, 0x74, 0xc3,
+ 0x12, 0x1c, 0xff, 0x6a, 0xe1, 0x8d, 0x91, 0x62, 0x8b, 0x77, 0x45, 0x56, 0x84, 0x39, 0x6a, 0xb4,
+ 0x97, 0xb1, 0xad, 0xd5, 0x46, 0xdb, 0xd6, 0x18, 0xb5, 0x13, 0xb6, 0x34, 0xfd, 0x70, 0x0c, 0xa6,
+ 0xeb, 0x9e, 0x6b, 0xd9, 0x4c, 0x49, 0x7e, 0x3e, 0xa1, 0x9a, 0x3d, 0xad, 0x0a, 0xa7, 0x87, 0x87,
+ 0xd5, 0x59, 0x59, 0x51, 0x91, 0x56, 0x2f, 0x49, 0x0b, 0x8d, 0xeb, 0x6a, 0x1f, 0x4a, 0x9a, 0x56,
+ 0x0f, 0x0f, 0xab, 0xf3, 0xb2, 0x59, 0xd2, 0xda, 0x42, 0x7b, 0x80, 0xa8, 0xce, 0x7c, 0xd7, 0x37,
+ 0xdc, 0x80, 0xa3, 0xa5, 0x4c, 0xcb, 0xd7, 0xcf, 0x5f, 0x18, 0x6e, 0x4e, 0x69, 0x8b, 0x58, 0xa5,
+ 0xbe, 0x3d, 0x80, 0x0d, 0x67, 0x50, 0x40, 0xef, 0xc0, 0x1c, 0x2d, 0xdd, 0xec, 0x59, 0x46, 0x48,
+ 0x94, 0x85, 0x72, 0x1a, 0x9a, 0x97, 0x05, 0xcd, 0xb9, 0xdb, 0x09, 0x4c, 0x38, 0x85, 0x99, 0xab,
+ 0xb2, 0x46, 0xe0, 0xb9, 0x6c, 0x0d, 0x24, 0x54, 0x59, 0x5a, 0x8a, 0x05, 0x14, 0x3d, 0x07, 0x53,
+ 0x5d, 0x12, 0x04, 0x46, 0x87, 0x30, 0xa6, 0x9e, 0x8e, 0x77, 0xae, 0x75, 0x5e, 0x8c, 0x23, 0x38,
+ 0xfa, 0x38, 0x4c, 0x98, 0x9e, 0x45, 0x82, 0xa5, 0x29, 0xe6, 0x51, 0xbe, 0xcc, 0x78, 0x89, 0x16,
+ 0x3c, 0x3c, 0xac, 0x4e, 0x33, 0x03, 0x84, 0xfe, 0xc2, 0xbc, 0x92, 0xfe, 0x1b, 0x54, 0x91, 0x4c,
+ 0x29, 0xe4, 0x43, 0xa8, 0xe0, 0x8f, 0xce, 0xbf, 0xa6, 0xff, 0xa0, 0x04, 0x17, 0x69, 0x0f, 0x7d,
+ 0xcf, 0x71, 0xa8, 0x90, 0xed, 0x39, 0xde, 0x41, 0x97, 0xb8, 0xe1, 0xfb, 0xaa, 0x97, 0xe8, 0x75,
+ 0x98, 0xec, 0xf1, 0xf3, 0x93, 0x31, 0xd6, 0x9d, 0x67, 0xe9, 0x24, 0xf2, 0xd3, 0x8d, 0x87, 0x87,
+ 0xd5, 0xe5, 0xac, 0x0f, 0x10, 0xe7, 0x22, 0xa2, 0x1d, 0xb2, 0x53, 0x1a, 0x07, 0x67, 0xba, 0x4f,
+ 0x0e, 0xa9, 0xc1, 0x1b, 0x5b, 0xc4, 0x19, 0x56, 0xc9, 0xd0, 0x7f, 0x5a, 0x82, 0xcb, 0x71, 0x8f,
+ 0x9a, 0x6e, 0x10, 0x1a, 0x8e, 0xc3, 0xed, 0xe0, 0xf3, 0x37, 0x91, 0x7a, 0x09, 0x13, 0x69, 0xa3,
+ 0xa8, 0xb9, 0x39, 0xd8, 0xf7, 0x5c, 0x2f, 0xd1, 0x7e, 0xca, 0x4b, 0xd4, 0x3a, 0x43, 0x9a, 0xc7,
+ 0x3b, 0x8c, 0xfe, 0x8f, 0x06, 0xcb, 0xd9, 0x0d, 0x1f, 0x81, 0xc1, 0xe6, 0x25, 0x0d, 0xb6, 0xcf,
+ 0x9f, 0xdd, 0x57, 0xe7, 0x98, 0x6e, 0xff, 0x39, 0xf7, 0x6b, 0x99, 0x11, 0xb7, 0x0d, 0xf3, 0x54,
+ 0xbb, 0x0e, 0x42, 0xe1, 0xce, 0x38, 0xdd, 0x39, 0x5f, 0xe4, 0xd2, 0x98, 0xc7, 0x49, 0x1c, 0x38,
+ 0x8d, 0x14, 0x6d, 0xc0, 0x14, 0xe5, 0x76, 0x8a, 0xbf, 0x34, 0x3c, 0x7e, 0x29, 0x50, 0xdb, 0xbc,
+ 0x2d, 0x8e, 0x90, 0xe8, 0x7f, 0xa6, 0xc1, 0x53, 0xc7, 0xcd, 0x3e, 0x7a, 0x17, 0xc0, 0x8c, 0xf6,
+ 0x30, 0x7e, 0x10, 0x5b, 0x54, 0x0b, 0x88, 0xb0, 0xc4, 0x4b, 0x48, 0x16, 0x05, 0x58, 0x21, 0x92,
+ 0xe1, 0xdd, 0x2f, 0x9d, 0x93, 0x77, 0x5f, 0xff, 0xbf, 0x9a, 0x2a, 0x2c, 0xd4, 0xd1, 0xff, 0xa0,
+ 0x09, 0x0b, 0xb5, 0xef, 0xb9, 0x9e, 0x95, 0xe4, 0x92, 0x55, 0x9b, 0x7c, 0xe0, 0x96, 0xac, 0xda,
+ 0xf9, 0x9c, 0x25, 0xfb, 0x77, 0x4a, 0x79, 0x5f, 0xcb, 0x96, 0xec, 0x7d, 0x98, 0x8e, 0xe2, 0x66,
+ 0x22, 0xc6, 0xbe, 0x3e, 0x6a, 0x9f, 0x38, 0xba, 0xf8, 0xf8, 0x2b, 0x2a, 0x09, 0x70, 0x4c, 0x0b,
+ 0xed, 0x03, 0x58, 0x72, 0xa3, 0x14, 0xb3, 0x7f, 0x73, 0x34, 0xca, 0xf1, 0xc6, 0x5b, 0x9b, 0xa3,
+ 0xfc, 0x16, 0xff, 0xc6, 0x0a, 0x2d, 0xfd, 0x77, 0x4b, 0x80, 0x06, 0xbb, 0x4b, 0x95, 0x8d, 0x5d,
+ 0xdb, 0xb5, 0xd2, 0xca, 0xc6, 0x2d, 0xdb, 0xb5, 0x30, 0x83, 0x48, 0x75, 0xa4, 0x94, 0xab, 0x8e,
+ 0xbc, 0x0a, 0xf3, 0x1d, 0xc7, 0xdb, 0x32, 0x1c, 0xe7, 0x40, 0x44, 0xdf, 0xb0, 0x0d, 0xa9, 0x5c,
+ 0xbb, 0x40, 0xe5, 0xda, 0x8d, 0x24, 0x08, 0xa7, 0xeb, 0xa2, 0x1e, 0x2c, 0xf8, 0xc4, 0xf4, 0x5c,
+ 0xd3, 0x76, 0x98, 0xf2, 0xe8, 0xf5, 0xc3, 0x82, 0x26, 0xdc, 0xc5, 0xa3, 0xc3, 0xea, 0x02, 0x4e,
+ 0xe1, 0xc2, 0x03, 0xd8, 0xd1, 0x33, 0x30, 0xd5, 0xf3, 0xed, 0xae, 0xe1, 0x1f, 0x30, 0xf5, 0xb4,
+ 0x5c, 0xab, 0x50, 0x01, 0xd9, 0xe2, 0x45, 0x38, 0x82, 0xe9, 0xdf, 0xd2, 0x60, 0xac, 0xb1, 0xd1,
+ 0x46, 0x3a, 0x4c, 0x5a, 0x5e, 0xd7, 0xb0, 0x5d, 0x31, 0x4a, 0x2c, 0x88, 0xa5, 0xc1, 0x4a, 0xb0,
+ 0x80, 0xa0, 0x1e, 0x4c, 0x47, 0xf2, 0x65, 0xa4, 0x43, 0x8d, 0xc6, 0x46, 0x5b, 0x1e, 0x04, 0x4b,
+ 0x56, 0x8a, 0x4a, 0x02, 0x1c, 0x13, 0xd1, 0x0d, 0x58, 0x6c, 0x6c, 0xb4, 0x9b, 0xae, 0xe9, 0xf4,
+ 0x2d, 0xb2, 0xb6, 0xcf, 0xfe, 0xd0, 0x2f, 0xb3, 0x79, 0x89, 0x08, 0x9c, 0x61, 0x5f, 0x26, 0x2a,
+ 0xe1, 0x08, 0x46, 0xab, 0x11, 0xde, 0x42, 0x44, 0x7d, 0xb0, 0x6a, 0x02, 0x09, 0x8e, 0x60, 0xfa,
+ 0x8f, 0x4a, 0x50, 0x51, 0x3a, 0x84, 0x1c, 0x98, 0xe2, 0x9f, 0x1b, 0x1d, 0xba, 0xae, 0x15, 0xfc,
+ 0xc4, 0x64, 0xaf, 0x39, 0x75, 0x3e, 0xa0, 0x01, 0x8e, 0x48, 0xa8, 0xb3, 0x54, 0xca, 0x9f, 0x25,
+ 0xb4, 0x02, 0xc0, 0x8f, 0x97, 0xd9, 0xf9, 0x0c, 0xd7, 0x52, 0xd9, 0x42, 0x68, 0xcb, 0x52, 0xac,
+ 0xd4, 0x40, 0x4f, 0x09, 0x7e, 0xe6, 0xa7, 0xcf, 0xe5, 0x14, 0x2f, 0x6f, 0xc3, 0xc4, 0x03, 0xcf,
+ 0x25, 0x81, 0x70, 0xf4, 0x9d, 0xd1, 0x07, 0x4e, 0x53, 0x01, 0xf5, 0x16, 0xc5, 0x8b, 0x39, 0x7a,
+ 0xfd, 0x37, 0x35, 0x80, 0x86, 0x11, 0x1a, 0xdc, 0x2f, 0x35, 0x44, 0xe0, 0xce, 0x53, 0x89, 0x65,
+ 0x58, 0x1e, 0x38, 0x3a, 0x18, 0x0f, 0xec, 0x07, 0xd1, 0xe7, 0xcb, 0xbd, 0x87, 0x63, 0x6f, 0xdb,
+ 0x0f, 0x08, 0x66, 0x70, 0xf4, 0x31, 0x98, 0x26, 0xae, 0xe9, 0x1f, 0xf4, 0x42, 0x62, 0xb1, 0x11,
+ 0x28, 0xf3, 0xf3, 0x9c, 0xb5, 0xa8, 0x10, 0xc7, 0x70, 0x7d, 0x0f, 0xca, 0x6b, 0xae, 0xd5, 0xf3,
+ 0x6c, 0x6e, 0x94, 0x9c, 0xd0, 0xc1, 0xa7, 0x61, 0xac, 0xef, 0x3b, 0xa2, 0x7f, 0x15, 0x51, 0x61,
+ 0x6c, 0x13, 0xdf, 0xc6, 0xb4, 0x9c, 0x5a, 0x7a, 0xbd, 0xbe, 0xdf, 0xf3, 0x82, 0xa8, 0x93, 0x52,
+ 0x31, 0x69, 0xf1, 0x62, 0x1c, 0xc1, 0xf5, 0x87, 0x1a, 0x2c, 0xac, 0xed, 0xf7, 0x6c, 0x9f, 0x45,
+ 0x04, 0x11, 0x9f, 0x6e, 0xe9, 0xb4, 0xfd, 0x1e, 0xff, 0x57, 0xf4, 0x41, 0xb6, 0x17, 0x35, 0x70,
+ 0x04, 0x47, 0xdb, 0x30, 0x47, 0x58, 0x73, 0x2a, 0x14, 0x1a, 0x86, 0xf4, 0x95, 0x9d, 0xc6, 0xd0,
+ 0xe5, 0x01, 0x67, 0x09, 0x2c, 0x38, 0x85, 0x15, 0xb5, 0x61, 0xce, 0x74, 0x8c, 0x20, 0xb0, 0xb7,
+ 0x6d, 0x33, 0x3e, 0xeb, 0x9a, 0xae, 0x7d, 0x8c, 0xb6, 0xad, 0x27, 0x20, 0x0f, 0x0f, 0xab, 0x97,
+ 0x44, 0x3f, 0x93, 0x00, 0x9c, 0x42, 0xa1, 0xff, 0x81, 0x06, 0xd3, 0x52, 0x91, 0x79, 0x7f, 0xd9,
+ 0x82, 0xcf, 0x42, 0xd9, 0xb2, 0x03, 0x55, 0xc0, 0xb3, 0xd3, 0x8a, 0x86, 0x28, 0xc3, 0x12, 0xaa,
+ 0x07, 0x50, 0xbe, 0x21, 0xd6, 0x08, 0x5a, 0x86, 0x92, 0x1d, 0xed, 0x2f, 0x20, 0x3e, 0xa0, 0xd4,
+ 0x6c, 0xe0, 0x92, 0x6d, 0x49, 0xae, 0x2a, 0xe5, 0x72, 0x95, 0x32, 0xed, 0x63, 0xc7, 0x4f, 0xbb,
+ 0xfe, 0x1d, 0x0d, 0x2a, 0x37, 0xed, 0x2d, 0xe2, 0xbb, 0x5c, 0x8b, 0x7b, 0x26, 0x1d, 0x23, 0x5a,
+ 0xc9, 0x8a, 0x0f, 0x45, 0xfb, 0x30, 0x1d, 0x98, 0x3b, 0xc4, 0xea, 0x3b, 0xf2, 0xac, 0xb0, 0x50,
+ 0x24, 0xa8, 0x42, 0xba, 0x2d, 0xf0, 0x29, 0xb1, 0x30, 0x11, 0x05, 0x1c, 0x13, 0xd3, 0xbf, 0x0c,
+ 0x17, 0x32, 0x1a, 0xa1, 0x2a, 0x4c, 0x04, 0xa1, 0xe1, 0x87, 0x62, 0xcc, 0x98, 0xec, 0x68, 0xd3,
+ 0x02, 0xcc, 0xcb, 0xd1, 0x13, 0x30, 0x46, 0x5c, 0x4b, 0x0c, 0xda, 0x14, 0x5d, 0x65, 0x6b, 0xae,
+ 0x85, 0x69, 0x19, 0x9d, 0x22, 0xc7, 0x4b, 0x30, 0x23, 0x9b, 0xa2, 0xdb, 0xa2, 0x0c, 0x4b, 0xa8,
+ 0xfe, 0x4b, 0x93, 0xf0, 0xf4, 0x4d, 0xcf, 0xb7, 0x1f, 0x78, 0x6e, 0x68, 0x38, 0x2d, 0xcf, 0x8a,
+ 0xbd, 0xdf, 0x62, 0xba, 0xbf, 0xae, 0xc1, 0xe3, 0x66, 0xaf, 0xdf, 0x74, 0xed, 0xd0, 0x36, 0x22,
+ 0xa7, 0x64, 0x8b, 0xf8, 0xb6, 0x57, 0xd4, 0x09, 0xce, 0x62, 0x0b, 0xeb, 0xad, 0xcd, 0x2c, 0x94,
+ 0x38, 0x8f, 0x16, 0x7a, 0x07, 0xe6, 0x2c, 0xef, 0xbe, 0xcb, 0x5d, 0xa4, 0xc4, 0x31, 0x0e, 0x0a,
+ 0xba, 0xbe, 0x19, 0x8b, 0x37, 0x12, 0x98, 0x70, 0x0a, 0x33, 0xf3, 0xfb, 0xcb, 0xa2, 0x76, 0xc8,
+ 0x62, 0x2b, 0x1f, 0xc4, 0xc3, 0x59, 0xd0, 0xef, 0xdf, 0xc8, 0xc4, 0x88, 0x73, 0x28, 0xa1, 0xaf,
+ 0xc0, 0x25, 0x9b, 0x0f, 0x04, 0x26, 0x86, 0x65, 0xbb, 0x24, 0x08, 0xf8, 0x77, 0x8f, 0xe0, 0xd8,
+ 0x6e, 0x66, 0x21, 0xc4, 0xd9, 0x74, 0xd0, 0xdb, 0x00, 0xc1, 0x81, 0x6b, 0x8a, 0xb9, 0x9e, 0x28,
+ 0x44, 0x95, 0x6f, 0xc1, 0x12, 0x0b, 0x56, 0x30, 0xd2, 0x5d, 0x28, 0xf4, 0x1c, 0xe2, 0x1b, 0xae,
+ 0xc9, 0xfd, 0x7e, 0x1a, 0xdf, 0x85, 0xee, 0x46, 0x85, 0x38, 0x86, 0x23, 0x0b, 0x66, 0xfa, 0x3d,
+ 0x65, 0xf2, 0xa7, 0x8a, 0x3b, 0xbf, 0x37, 0x15, 0x3c, 0x38, 0x81, 0x55, 0xff, 0x67, 0x1a, 0x4c,
+ 0x89, 0xc8, 0x68, 0xf4, 0xd1, 0x94, 0xbe, 0x27, 0xbd, 0x20, 0x29, 0x9d, 0xef, 0x80, 0xd9, 0xc7,
+ 0x42, 0xa3, 0x16, 0x4c, 0x59, 0x48, 0x61, 0x10, 0x84, 0x63, 0xf5, 0x3c, 0x61, 0x27, 0x47, 0x2a,
+ 0xbb, 0x42, 0x4c, 0xff, 0xb6, 0x06, 0x8b, 0x03, 0xad, 0x86, 0x50, 0xe6, 0x1f, 0xa1, 0x7f, 0xf3,
+ 0xd7, 0x00, 0x58, 0x10, 0xfc, 0x6a, 0xab, 0xd9, 0x26, 0xfe, 0x9e, 0x14, 0x2b, 0x7f, 0x53, 0x83,
+ 0x85, 0xd8, 0x13, 0x2f, 0x7a, 0xa1, 0x15, 0x8f, 0x2b, 0xb8, 0x95, 0xc2, 0x55, 0x5b, 0x12, 0xdf,
+ 0xbd, 0x90, 0x86, 0xe0, 0x01, 0xba, 0xe8, 0x6f, 0x68, 0xb0, 0x60, 0x24, 0x83, 0xe0, 0xa3, 0x4d,
+ 0xa0, 0x50, 0x70, 0x57, 0x2a, 0xa0, 0x3e, 0xee, 0x4b, 0x0a, 0x10, 0xe0, 0x01, 0xb2, 0xe8, 0x53,
+ 0x30, 0x63, 0xf4, 0xec, 0xd5, 0xbe, 0x65, 0x13, 0xd7, 0x94, 0x91, 0xd3, 0x8c, 0x71, 0x57, 0x5b,
+ 0x4d, 0x59, 0x8e, 0x13, 0xb5, 0x64, 0x94, 0xbb, 0x18, 0xc8, 0xf1, 0x11, 0xa3, 0xdc, 0xc5, 0x18,
+ 0xc6, 0x51, 0xee, 0x62, 0xe8, 0x54, 0x22, 0xe8, 0xe7, 0xe0, 0x09, 0xbe, 0x7b, 0xd6, 0x8c, 0xc0,
+ 0x36, 0x57, 0xfb, 0xe1, 0x0e, 0x71, 0xc3, 0x48, 0x07, 0xe2, 0x16, 0x15, 0x3b, 0x67, 0x5c, 0xcb,
+ 0xab, 0x84, 0xf3, 0xdb, 0x23, 0x17, 0xc0, 0xb3, 0x2d, 0x53, 0x7c, 0x0f, 0x3f, 0xea, 0x2a, 0x74,
+ 0x0c, 0x7c, 0xa7, 0xd9, 0xa8, 0x8b, 0xcf, 0x61, 0xc2, 0x28, 0xfe, 0x8d, 0x15, 0x0a, 0xe8, 0x9b,
+ 0x1a, 0xcc, 0x0a, 0x26, 0x17, 0x34, 0xa7, 0xd8, 0xfc, 0xbf, 0x55, 0x94, 0x19, 0x53, 0x0c, 0xbf,
+ 0x82, 0x55, 0xe4, 0x3c, 0x5c, 0x4d, 0x06, 0x3e, 0x24, 0x60, 0x38, 0xd9, 0x0f, 0xf4, 0xf7, 0x34,
+ 0xb8, 0x18, 0x10, 0x7f, 0xcf, 0x36, 0xc9, 0xaa, 0x69, 0x7a, 0x7d, 0x37, 0x9a, 0xe4, 0x72, 0x71,
+ 0xbf, 0x41, 0x3b, 0x03, 0x5f, 0x6d, 0xe9, 0xe8, 0xb0, 0x7a, 0x31, 0x0b, 0x82, 0x33, 0xe9, 0xd3,
+ 0x5d, 0x72, 0xfe, 0xbe, 0x11, 0x9a, 0x3b, 0x75, 0xc3, 0xdc, 0x61, 0xe6, 0x45, 0xb0, 0x34, 0x5d,
+ 0x3c, 0x22, 0xf2, 0xcd, 0x24, 0x2a, 0xee, 0x36, 0x48, 0x15, 0xe2, 0x34, 0x41, 0x14, 0x40, 0xd9,
+ 0x27, 0xef, 0xf6, 0x49, 0x10, 0x06, 0x4b, 0xc0, 0x88, 0x37, 0x47, 0x9e, 0x31, 0x2c, 0x10, 0x72,
+ 0xad, 0x29, 0xfa, 0x85, 0x25, 0xa1, 0xe5, 0xd7, 0x01, 0x0d, 0x4e, 0x27, 0x5a, 0x80, 0xb1, 0x5d,
+ 0xc2, 0x6f, 0x98, 0x4c, 0x63, 0xfa, 0x2f, 0xba, 0x08, 0x13, 0x7b, 0x86, 0xd3, 0xe7, 0x9a, 0x6d,
+ 0x19, 0xf3, 0x1f, 0x2f, 0x97, 0x3e, 0xa3, 0xe9, 0xbf, 0xa7, 0xc1, 0xa5, 0x4c, 0x9a, 0x08, 0xc3,
+ 0xe5, 0xae, 0xb1, 0xbf, 0xe1, 0xb9, 0xeb, 0xfd, 0xd0, 0x08, 0x6d, 0xb7, 0xd3, 0x74, 0xb7, 0x1d,
+ 0xbb, 0xb3, 0xc3, 0x15, 0xc1, 0x09, 0xae, 0x4a, 0xac, 0x67, 0xd6, 0xc0, 0x39, 0x2d, 0x51, 0x13,
+ 0x2e, 0x74, 0x8d, 0xfd, 0x01, 0x84, 0x25, 0x86, 0x90, 0xdd, 0x53, 0x5a, 0x1f, 0x04, 0xe3, 0xac,
+ 0x36, 0xfa, 0x37, 0xc6, 0xe1, 0x49, 0xda, 0xf1, 0x78, 0xbf, 0x59, 0x37, 0x5c, 0xa3, 0xf3, 0xfe,
+ 0x94, 0xeb, 0xdf, 0xd1, 0xe0, 0xf1, 0x9d, 0x6c, 0xed, 0x56, 0xec, 0x78, 0x6f, 0x14, 0xd2, 0xf1,
+ 0x8f, 0x53, 0x98, 0xf9, 0x41, 0xff, 0xb1, 0x55, 0x70, 0x5e, 0xa7, 0xd0, 0xeb, 0xb0, 0xe0, 0x7a,
+ 0x16, 0xa9, 0x37, 0x1b, 0x78, 0xdd, 0x08, 0x76, 0xdb, 0x91, 0x31, 0x3f, 0xc1, 0x9d, 0x5a, 0x1b,
+ 0x29, 0x18, 0x1e, 0xa8, 0x8d, 0xf6, 0x00, 0xf5, 0x3c, 0x6b, 0x6d, 0xcf, 0x36, 0xa3, 0x13, 0xdf,
+ 0xe2, 0x8e, 0x34, 0x16, 0x84, 0xdd, 0x1a, 0xc0, 0x86, 0x33, 0x28, 0xe8, 0x7f, 0xa4, 0xc1, 0x3c,
+ 0x9d, 0x91, 0x96, 0xef, 0xed, 0x1f, 0xbc, 0x1f, 0x79, 0xe1, 0x39, 0x18, 0xef, 0x7a, 0x56, 0x64,
+ 0x64, 0x5e, 0xa2, 0x1a, 0xd1, 0xba, 0x67, 0x91, 0x87, 0xdc, 0xb1, 0xb6, 0x7f, 0x40, 0x7f, 0x60,
+ 0x56, 0x45, 0xff, 0x6f, 0x1a, 0xd7, 0x59, 0x22, 0x5b, 0xec, 0x7d, 0xc9, 0xdb, 0x2f, 0xc2, 0x2c,
+ 0x2d, 0x5b, 0x37, 0xf6, 0x5b, 0x8d, 0x7b, 0x9e, 0x13, 0x45, 0x20, 0xb0, 0x58, 0xe0, 0x5b, 0x2a,
+ 0x00, 0x27, 0xeb, 0xe9, 0xdf, 0x9c, 0x05, 0x56, 0xc1, 0x21, 0xe1, 0xfb, 0xf1, 0xbb, 0x9e, 0x87,
+ 0x8a, 0xd9, 0xeb, 0xd7, 0xaf, 0xb7, 0xdf, 0xe8, 0x7b, 0xa1, 0x21, 0x7c, 0x7e, 0x4c, 0x11, 0xa9,
+ 0xb7, 0x36, 0xa3, 0x62, 0xac, 0xd6, 0xa1, 0xab, 0xc6, 0xec, 0xf5, 0x85, 0x1c, 0x6a, 0xa9, 0xe7,
+ 0xd4, 0x6c, 0xd5, 0xd4, 0x5b, 0x9b, 0x09, 0x18, 0x1e, 0xa8, 0x8d, 0xbe, 0x02, 0x33, 0x44, 0x30,
+ 0xf4, 0x4d, 0xc3, 0xb7, 0xc4, 0x7a, 0x29, 0xbc, 0x93, 0xc8, 0xa1, 0x8d, 0x56, 0x09, 0xd7, 0xdf,
+ 0xd6, 0x14, 0x12, 0x38, 0x41, 0x90, 0xe9, 0x52, 0xe2, 0x37, 0x9d, 0x29, 0xcf, 0xba, 0xe1, 0x1b,
+ 0x26, 0x51, 0x4c, 0xaf, 0x09, 0xa1, 0x4b, 0xe5, 0x55, 0xc2, 0xf9, 0xed, 0xd1, 0x6f, 0x6b, 0x70,
+ 0x59, 0x42, 0x6d, 0xd7, 0xee, 0xf6, 0xbb, 0x98, 0x98, 0x8e, 0x61, 0x77, 0x85, 0x62, 0xf5, 0xe6,
+ 0x99, 0x7d, 0x68, 0x12, 0x3d, 0xdf, 0xac, 0xb2, 0x61, 0x38, 0xa7, 0x4b, 0xe8, 0xdb, 0x1a, 0x5c,
+ 0x8d, 0x40, 0x2d, 0x6a, 0xda, 0xf4, 0x7d, 0x12, 0xc7, 0xb0, 0x88, 0x21, 0x29, 0x66, 0xfe, 0x7d,
+ 0xe4, 0xe8, 0xb0, 0x7a, 0x75, 0xed, 0x04, 0xdc, 0xf8, 0x44, 0xea, 0x2a, 0xbb, 0xb4, 0xbd, 0xed,
+ 0x50, 0x68, 0x62, 0xe7, 0xc5, 0x2e, 0x94, 0x04, 0x4e, 0x10, 0x44, 0xbf, 0xa3, 0xc1, 0xe3, 0x6a,
+ 0x81, 0xca, 0x2d, 0x5c, 0x05, 0xfb, 0xc2, 0x99, 0x75, 0x26, 0x85, 0x9f, 0xbb, 0x6f, 0x72, 0x80,
+ 0x38, 0xaf, 0x57, 0xe8, 0x19, 0x98, 0xea, 0x32, 0xc6, 0xe4, 0x6a, 0xda, 0x04, 0x77, 0xc3, 0x71,
+ 0x5e, 0x0d, 0x70, 0x04, 0xa3, 0xd6, 0x4f, 0xcf, 0xb3, 0x5a, 0xb6, 0x15, 0xdc, 0xb6, 0xbb, 0x76,
+ 0xb8, 0x54, 0x61, 0xd7, 0x06, 0xd8, 0x70, 0xb4, 0x3c, 0xab, 0xd5, 0x6c, 0xf0, 0x72, 0x9c, 0xa8,
+ 0xc5, 0x42, 0x24, 0xed, 0xae, 0xd1, 0x21, 0xad, 0xbe, 0xe3, 0xb4, 0x7c, 0x8f, 0x59, 0xc4, 0x0d,
+ 0x62, 0x58, 0x8e, 0xed, 0x92, 0xa5, 0x99, 0xe2, 0x21, 0x92, 0xcd, 0x3c, 0xa4, 0x38, 0x9f, 0x1e,
+ 0x5a, 0x01, 0xd8, 0x36, 0x6c, 0xa7, 0x7d, 0xdf, 0xe8, 0xdd, 0x71, 0x97, 0x66, 0x99, 0x00, 0x63,
+ 0xa6, 0xc7, 0x75, 0x59, 0x8a, 0x95, 0x1a, 0x94, 0x9b, 0xa8, 0x14, 0xc4, 0x84, 0x5f, 0x7f, 0x58,
+ 0x9a, 0x3b, 0x23, 0x6e, 0x8a, 0x10, 0xf2, 0xe1, 0xbb, 0xa5, 0x90, 0xc0, 0x09, 0x82, 0xe8, 0xeb,
+ 0x1a, 0xcc, 0x05, 0x07, 0x41, 0x48, 0xba, 0xb2, 0x0f, 0xf3, 0x67, 0xdd, 0x07, 0xe6, 0x2b, 0x68,
+ 0x27, 0x88, 0xe0, 0x14, 0x51, 0xfd, 0xb0, 0xc4, 0x95, 0xe2, 0x01, 0x16, 0x44, 0xaf, 0xc2, 0x7c,
+ 0x97, 0x74, 0x3d, 0xff, 0x60, 0x35, 0xba, 0x22, 0x2c, 0xbc, 0x1b, 0xcc, 0x48, 0x58, 0x4f, 0x82,
+ 0x70, 0xba, 0x2e, 0xdd, 0x20, 0xd8, 0x74, 0x5d, 0x6f, 0xc7, 0xed, 0x4b, 0xf1, 0x06, 0xd1, 0x4c,
+ 0xc1, 0xf0, 0x40, 0x6d, 0x54, 0x87, 0x45, 0x51, 0xd6, 0xa4, 0x2a, 0x57, 0x70, 0xdd, 0x27, 0xd1,
+ 0x09, 0x06, 0x55, 0x25, 0x16, 0x9b, 0x69, 0x20, 0x1e, 0xac, 0x4f, 0xbf, 0x82, 0xfe, 0x50, 0x7b,
+ 0x31, 0x1e, 0x7f, 0xc5, 0x46, 0x12, 0x84, 0xd3, 0x75, 0x23, 0xe5, 0x30, 0xd1, 0x85, 0x89, 0xf8,
+ 0x2b, 0x36, 0x52, 0x30, 0x3c, 0x50, 0x5b, 0xff, 0xef, 0xe3, 0xf0, 0xe1, 0x21, 0xc4, 0x36, 0xea,
+ 0x66, 0x0f, 0xf7, 0x09, 0x8b, 0x68, 0x25, 0x3a, 0xe0, 0x5e, 0x79, 0xa3, 0x6f, 0xb8, 0xa1, 0x1d,
+ 0x1e, 0x0c, 0x39, 0x3d, 0xbd, 0x9c, 0xe9, 0x39, 0x3d, 0xbd, 0x61, 0xa7, 0x33, 0xc8, 0x9b, 0xce,
+ 0xd3, 0x93, 0x1c, 0x7e, 0xfa, 0xbb, 0xd9, 0xd3, 0x5f, 0x70, 0x54, 0x4f, 0x64, 0x97, 0x5e, 0x0e,
+ 0xbb, 0x14, 0x1c, 0xd5, 0x21, 0xd8, 0xeb, 0x4f, 0xc6, 0xe1, 0x23, 0xc3, 0x6c, 0x21, 0x05, 0xf9,
+ 0x2b, 0x43, 0x48, 0x9f, 0x2b, 0x7f, 0xe5, 0x85, 0x16, 0x9c, 0x23, 0x7f, 0x65, 0x90, 0x3c, 0x6f,
+ 0xfe, 0xca, 0x1b, 0xd5, 0xf3, 0xe2, 0xaf, 0xbc, 0x51, 0x1d, 0x82, 0xbf, 0xfe, 0x34, 0xbd, 0x3f,
+ 0xc8, 0x1d, 0xac, 0x09, 0x63, 0x66, 0xaf, 0x5f, 0x50, 0x48, 0xb1, 0xb3, 0xb3, 0x7a, 0x6b, 0x13,
+ 0x53, 0x1c, 0x08, 0xc3, 0x24, 0xe7, 0x9f, 0x82, 0x22, 0x88, 0x85, 0x85, 0x70, 0x96, 0xc4, 0x02,
+ 0x13, 0x1d, 0x2a, 0xd2, 0xdb, 0x21, 0x5d, 0xe2, 0x1b, 0x4e, 0x3b, 0xf4, 0x7c, 0xa3, 0x53, 0x54,
+ 0xda, 0xb0, 0xa1, 0x5a, 0x4b, 0xe1, 0xc2, 0x03, 0xd8, 0xe9, 0x80, 0xf4, 0x6c, 0xab, 0xa0, 0x7c,
+ 0x61, 0x03, 0xd2, 0x6a, 0x36, 0x30, 0xc5, 0xa1, 0xff, 0x61, 0x19, 0x94, 0xf8, 0x79, 0x6a, 0xa9,
+ 0x18, 0x8e, 0xe3, 0xdd, 0x6f, 0xf9, 0xf6, 0x9e, 0xed, 0x90, 0x0e, 0xb1, 0x64, 0x80, 0x75, 0x20,
+ 0x4e, 0x58, 0x99, 0xea, 0xb4, 0x9a, 0x57, 0x09, 0xe7, 0xb7, 0xa7, 0x96, 0xe8, 0xa2, 0x99, 0xbe,
+ 0x86, 0x33, 0xca, 0x99, 0xca, 0xc0, 0x9d, 0x1e, 0xbe, 0x9e, 0x06, 0x8a, 0xf1, 0x20, 0x59, 0xf4,
+ 0x55, 0x8d, 0x9b, 0xd8, 0xd2, 0x47, 0x27, 0xe6, 0xec, 0xc6, 0x19, 0xb9, 0x84, 0x63, 0x5b, 0x3d,
+ 0xf6, 0x02, 0x26, 0x09, 0x52, 0x5b, 0xe8, 0xd2, 0x6e, 0x96, 0xb7, 0x4d, 0xcc, 0xec, 0x9d, 0xa2,
+ 0x5d, 0xc9, 0x71, 0xdf, 0xf1, 0x53, 0xc2, 0xcc, 0x0a, 0x38, 0xbb, 0x23, 0x72, 0x94, 0xa4, 0xb3,
+ 0x44, 0x08, 0x81, 0xc2, 0xa3, 0x94, 0xf2, 0xba, 0xc4, 0xa3, 0x24, 0x01, 0x38, 0x49, 0x10, 0xf5,
+ 0x60, 0x7a, 0x37, 0x72, 0x3d, 0x09, 0x8b, 0xb6, 0x5e, 0x94, 0xba, 0xe2, 0xbf, 0xe2, 0xa7, 0x91,
+ 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x0e, 0x4c, 0xed, 0x72, 0x41, 0x24, 0x2c, 0xd1, 0xd5, 0x91, 0x35,
+ 0x65, 0x6e, 0x10, 0x89, 0x22, 0x1c, 0xa1, 0x57, 0x23, 0x1f, 0xca, 0x27, 0x04, 0xbc, 0xfc, 0xba,
+ 0x06, 0x97, 0xf6, 0x88, 0x1f, 0xda, 0x66, 0xda, 0xd7, 0x39, 0x5d, 0x5c, 0x9b, 0xbf, 0x97, 0x85,
+ 0x90, 0xb3, 0x49, 0x26, 0x08, 0x67, 0x77, 0x41, 0xff, 0xa9, 0x06, 0x03, 0xde, 0x1f, 0xf4, 0x2b,
+ 0x1a, 0xcc, 0x6c, 0x13, 0x23, 0xec, 0xfb, 0xe4, 0x86, 0x11, 0xca, 0x20, 0xcc, 0x7b, 0x67, 0xe1,
+ 0x74, 0x5a, 0xb9, 0xae, 0x20, 0xe6, 0xe7, 0x2d, 0xf2, 0xae, 0xa7, 0x0a, 0xc2, 0x89, 0x1e, 0x2c,
+ 0xbf, 0x06, 0x8b, 0x03, 0x0d, 0x4f, 0xe5, 0xd9, 0xff, 0x77, 0xc2, 0x79, 0x98, 0x4e, 0xf0, 0xf5,
+ 0x36, 0x4c, 0x18, 0x96, 0x25, 0xd3, 0x87, 0xbc, 0x54, 0xec, 0x5c, 0xd1, 0x52, 0x63, 0x5d, 0xd9,
+ 0x4f, 0xcc, 0xd1, 0xa2, 0xeb, 0x80, 0x8c, 0xc4, 0x11, 0xda, 0xba, 0x67, 0x45, 0x56, 0x12, 0x73,
+ 0xe4, 0xae, 0x0e, 0x40, 0x71, 0x46, 0x0b, 0xfd, 0x15, 0x98, 0x4b, 0xde, 0xa2, 0x3a, 0x45, 0xcc,
+ 0x95, 0xfe, 0xd7, 0x35, 0x40, 0x83, 0x57, 0x8b, 0x91, 0x0f, 0x65, 0x51, 0x23, 0x9a, 0xe2, 0x42,
+ 0x7e, 0xc5, 0x74, 0x34, 0x58, 0x1c, 0x6c, 0x2c, 0x0a, 0x02, 0x2c, 0xe9, 0xe8, 0x7f, 0xa6, 0x41,
+ 0x9c, 0x9f, 0x00, 0x7d, 0x1a, 0x2a, 0x16, 0x09, 0x4c, 0xdf, 0xee, 0x85, 0xf1, 0x77, 0xc8, 0x6b,
+ 0xa1, 0x8d, 0x18, 0x84, 0xd5, 0x7a, 0x48, 0x87, 0xc9, 0xd0, 0x08, 0x76, 0x9b, 0x0d, 0x61, 0x2e,
+ 0xb2, 0xcd, 0xfd, 0x2e, 0x2b, 0xc1, 0x02, 0x12, 0xdf, 0x48, 0x1a, 0x1b, 0xe2, 0x46, 0x12, 0xda,
+ 0x3e, 0x83, 0xeb, 0x57, 0xe8, 0xe4, 0xab, 0x57, 0xfa, 0x7f, 0x2a, 0x41, 0x32, 0x25, 0x44, 0xd1,
+ 0x21, 0x18, 0xbc, 0x2f, 0x56, 0x3a, 0xb7, 0xfb, 0x62, 0x1f, 0x67, 0xf9, 0x94, 0x78, 0x02, 0x3c,
+ 0x7e, 0xec, 0xa1, 0x66, 0x41, 0xe2, 0xe9, 0xeb, 0x64, 0x0d, 0xf4, 0x12, 0x8b, 0x90, 0x0a, 0x23,
+ 0x23, 0xfa, 0xc3, 0xd1, 0xb2, 0x68, 0xd3, 0xc2, 0x87, 0xe2, 0x46, 0x9c, 0xfc, 0x7e, 0x56, 0x8a,
+ 0x79, 0x0b, 0xf4, 0x69, 0x11, 0x50, 0x37, 0x91, 0xb8, 0xb5, 0x17, 0x5d, 0xf5, 0x5b, 0x4c, 0x34,
+ 0x8c, 0xa3, 0xec, 0xf4, 0xbf, 0xab, 0xc1, 0x94, 0xb8, 0x27, 0x3e, 0x44, 0x4c, 0xde, 0x36, 0x4c,
+ 0x30, 0x25, 0x7d, 0x14, 0xfd, 0xa5, 0xbd, 0xe3, 0x79, 0x61, 0xe2, 0xb6, 0x3c, 0x0b, 0x04, 0x63,
+ 0xff, 0x62, 0x8e, 0x5e, 0xff, 0xd6, 0x38, 0x5c, 0x15, 0x55, 0x06, 0xb6, 0x67, 0xb9, 0x04, 0x0f,
+ 0xe0, 0x82, 0x98, 0xa5, 0x86, 0x6f, 0xd8, 0xf2, 0x60, 0xa8, 0x98, 0xd9, 0x25, 0x8e, 0x0c, 0x07,
+ 0xd0, 0xe1, 0x2c, 0x1a, 0xe8, 0xe7, 0xe1, 0xa2, 0x28, 0xbe, 0x49, 0x0c, 0x27, 0xdc, 0x89, 0x68,
+ 0x17, 0x33, 0xc1, 0xd8, 0x29, 0xf5, 0x7a, 0x06, 0x3e, 0x9c, 0x49, 0x85, 0xc5, 0x72, 0x09, 0x40,
+ 0xdd, 0x27, 0x86, 0x7a, 0x2a, 0x36, 0x42, 0x2c, 0xd7, 0x7a, 0x26, 0x46, 0x9c, 0x43, 0x89, 0xf9,
+ 0xaf, 0x8c, 0x7d, 0x66, 0x0e, 0x63, 0x12, 0xfa, 0x36, 0xcb, 0x5f, 0x40, 0xf9, 0x9b, 0x1b, 0xb0,
+ 0x49, 0x10, 0x4e, 0xd7, 0x45, 0x2f, 0xc3, 0x1c, 0x3b, 0xe8, 0x8b, 0xaf, 0xe1, 0x4c, 0xc4, 0xf9,
+ 0x10, 0x37, 0x12, 0x10, 0x9c, 0xaa, 0xa9, 0xff, 0x96, 0x06, 0x33, 0x2a, 0x03, 0x0d, 0x11, 0xc3,
+ 0xdb, 0x57, 0xc4, 0xf5, 0x08, 0xa1, 0x90, 0x2a, 0xd5, 0x61, 0x24, 0xf6, 0x43, 0x0d, 0x2e, 0x64,
+ 0xb4, 0x61, 0xc7, 0x53, 0x24, 0x25, 0xfa, 0x47, 0x39, 0x9e, 0x1a, 0xd8, 0x46, 0xe4, 0xf1, 0x54,
+ 0x1a, 0x82, 0x07, 0xe8, 0xa2, 0x7b, 0x30, 0x66, 0xfa, 0xb6, 0x18, 0x96, 0x17, 0x0b, 0x99, 0x24,
+ 0xb8, 0x19, 0x07, 0x46, 0xd7, 0x71, 0x13, 0x53, 0x84, 0xfa, 0xbf, 0x1a, 0x83, 0x8a, 0x92, 0x82,
+ 0x02, 0xad, 0x8f, 0x62, 0xd1, 0xc6, 0xe8, 0x23, 0xab, 0x76, 0x1d, 0xc6, 0x3a, 0xbd, 0x7e, 0x41,
+ 0x93, 0x56, 0xa2, 0xbb, 0x41, 0xd1, 0x75, 0x7a, 0x7d, 0x74, 0x4f, 0x1a, 0xc9, 0xc5, 0xcc, 0x58,
+ 0x19, 0x4b, 0x97, 0x32, 0x94, 0x23, 0xde, 0x1c, 0xcf, 0xe5, 0xcd, 0x2e, 0x4c, 0x05, 0xc2, 0x82,
+ 0x9e, 0x28, 0x9e, 0xfe, 0x44, 0x19, 0x69, 0x61, 0x31, 0x73, 0xf5, 0x3b, 0x32, 0xa8, 0x23, 0x1a,
+ 0x54, 0x01, 0xe8, 0xb3, 0x68, 0x66, 0x66, 0x57, 0x94, 0xb9, 0x02, 0xb0, 0xc9, 0x4a, 0xb0, 0x80,
+ 0xe8, 0xff, 0x52, 0x03, 0x34, 0x88, 0x10, 0x7d, 0x18, 0x26, 0x58, 0x50, 0xb7, 0x58, 0x68, 0xca,
+ 0xcd, 0x77, 0x23, 0x08, 0x30, 0x87, 0xa1, 0x37, 0x45, 0xc4, 0x7e, 0xb1, 0x89, 0x91, 0xbb, 0xb7,
+ 0xa0, 0xa9, 0x84, 0xf8, 0x47, 0xdb, 0xd3, 0x58, 0xde, 0xf6, 0xa4, 0xff, 0x49, 0x89, 0x72, 0x9c,
+ 0xed, 0x86, 0xc4, 0x65, 0x11, 0x96, 0x07, 0x00, 0x46, 0x3f, 0xf4, 0xf8, 0x76, 0x2c, 0x18, 0xaf,
+ 0x59, 0x6c, 0x70, 0x25, 0xd2, 0x55, 0x89, 0x90, 0x9f, 0x80, 0xc4, 0xbf, 0xb1, 0x42, 0x8c, 0x92,
+ 0x0e, 0xed, 0x2e, 0x79, 0xd3, 0x76, 0x2d, 0xef, 0xbe, 0x18, 0x8b, 0x51, 0x49, 0xdf, 0x95, 0x08,
+ 0x39, 0xe9, 0xf8, 0x37, 0x56, 0x88, 0xa1, 0x2f, 0xc0, 0x12, 0x4b, 0xa8, 0xea, 0xb2, 0x54, 0x3c,
+ 0xa2, 0x6f, 0x9e, 0xe3, 0x44, 0xfb, 0x43, 0xb9, 0xf6, 0xd4, 0xd1, 0x61, 0x75, 0xa9, 0x9e, 0x53,
+ 0x07, 0xe7, 0xb6, 0xd6, 0x7f, 0x47, 0x83, 0x4b, 0x99, 0x43, 0x81, 0x6e, 0xc0, 0x62, 0x7c, 0xec,
+ 0xad, 0x0a, 0xb4, 0x72, 0x9c, 0x59, 0xea, 0x56, 0xba, 0x02, 0x1e, 0x6c, 0x83, 0xd6, 0xe5, 0xa6,
+ 0xae, 0x0a, 0x4c, 0x71, 0x66, 0xfe, 0xa4, 0x40, 0x95, 0x25, 0x53, 0x71, 0x56, 0x3b, 0xfd, 0xe7,
+ 0x12, 0x1d, 0x8e, 0x07, 0x8c, 0xb2, 0xf2, 0x16, 0xe9, 0xc8, 0x48, 0x58, 0xc9, 0xca, 0x35, 0x5a,
+ 0x88, 0x39, 0x0c, 0x3d, 0xad, 0xc6, 0xa3, 0x4b, 0x91, 0x11, 0xc5, 0xa4, 0xeb, 0x21, 0xc0, 0xba,
+ 0xe7, 0xda, 0xa1, 0xe7, 0xdb, 0x6e, 0x07, 0x6d, 0x43, 0xd9, 0x10, 0xd9, 0x7e, 0x05, 0xab, 0x7d,
+ 0xb6, 0x90, 0x41, 0x24, 0x70, 0xf0, 0x48, 0xad, 0xe8, 0x17, 0x96, 0xb8, 0xf5, 0x7f, 0xaa, 0xc1,
+ 0x65, 0x2a, 0x3d, 0xac, 0xe8, 0xaa, 0x9b, 0xbc, 0x12, 0x3b, 0xc4, 0x3e, 0xd8, 0x85, 0x8a, 0x1f,
+ 0x37, 0x13, 0x7c, 0xf9, 0xb3, 0xea, 0x75, 0x5b, 0x25, 0xf5, 0x37, 0xd5, 0x11, 0xea, 0xbe, 0x17,
+ 0x44, 0x93, 0x93, 0xbe, 0x81, 0x2b, 0xd7, 0xaa, 0xd2, 0x13, 0xac, 0xe2, 0xd7, 0xbf, 0x56, 0x02,
+ 0xd8, 0x20, 0xe1, 0x7d, 0xcf, 0xdf, 0xa5, 0x43, 0xf4, 0xbe, 0xba, 0xf4, 0xf1, 0x14, 0x8c, 0xf7,
+ 0x3c, 0x2b, 0x10, 0xe2, 0x84, 0xdd, 0x3b, 0x62, 0x07, 0xb7, 0xac, 0x14, 0x55, 0x61, 0x82, 0x79,
+ 0x69, 0x85, 0xdc, 0x66, 0x2a, 0x2a, 0x55, 0x4b, 0x02, 0xcc, 0xcb, 0x79, 0xee, 0x3c, 0x16, 0x42,
+ 0x18, 0x08, 0x9d, 0x5b, 0xe4, 0xce, 0xe3, 0x65, 0x58, 0x42, 0xf5, 0xaf, 0x8d, 0x43, 0x22, 0x5d,
+ 0x75, 0x6c, 0x37, 0x6b, 0xe7, 0x63, 0x37, 0x7f, 0x01, 0x96, 0x1c, 0xcf, 0xb0, 0x6a, 0x86, 0x43,
+ 0x99, 0xde, 0x6f, 0xf3, 0xe9, 0x30, 0xdc, 0x8e, 0xcc, 0x85, 0xcc, 0x04, 0xc0, 0xed, 0x9c, 0x3a,
+ 0x38, 0xb7, 0x35, 0x0a, 0x65, 0x92, 0x6c, 0x9e, 0x29, 0xeb, 0xf6, 0xa8, 0xc9, 0xbc, 0x57, 0xd4,
+ 0xe0, 0x51, 0xb9, 0x85, 0x26, 0xf3, 0x68, 0xa3, 0x5f, 0xd0, 0xe0, 0x12, 0xd9, 0x0f, 0x89, 0xef,
+ 0x1a, 0xce, 0x5d, 0xdf, 0xd8, 0xde, 0xb6, 0x4d, 0x11, 0x12, 0xc3, 0x27, 0xa7, 0x75, 0x74, 0x58,
+ 0xbd, 0xb4, 0x96, 0x55, 0xe1, 0xe1, 0x61, 0xf5, 0x93, 0x83, 0xb9, 0xea, 0xa3, 0x58, 0xd1, 0xcc,
+ 0x26, 0x8c, 0x19, 0xb3, 0xc9, 0x2d, 0xbf, 0x04, 0x95, 0x53, 0x44, 0x47, 0x4e, 0xab, 0x3e, 0x94,
+ 0x7f, 0x30, 0x09, 0x4a, 0x9c, 0xee, 0x29, 0xf2, 0xa3, 0xfd, 0x23, 0x0d, 0x2e, 0x9a, 0x8e, 0x4d,
+ 0xdc, 0x30, 0x15, 0x8e, 0xcc, 0x17, 0xc6, 0x66, 0xa1, 0x00, 0xe2, 0x1e, 0x71, 0x9b, 0x8d, 0xba,
+ 0xe7, 0xba, 0xc4, 0x0c, 0xeb, 0x19, 0xc8, 0xb9, 0x49, 0x92, 0x05, 0xc1, 0x99, 0x9d, 0x61, 0xdf,
+ 0xc3, 0xca, 0x9b, 0x0d, 0xf5, 0x7a, 0x4e, 0x5d, 0x94, 0x61, 0x09, 0x45, 0xcf, 0x43, 0xa5, 0xe3,
+ 0x7b, 0xfd, 0x5e, 0x50, 0x67, 0xd1, 0x3a, 0x7c, 0x06, 0x59, 0x30, 0xd4, 0x8d, 0xb8, 0x18, 0xab,
+ 0x75, 0xd0, 0xa7, 0x60, 0x86, 0xff, 0x6c, 0xf9, 0x64, 0xdb, 0xde, 0x17, 0xcb, 0x8d, 0x85, 0x00,
+ 0xdc, 0x50, 0xca, 0x71, 0xa2, 0x16, 0xfa, 0x18, 0x4c, 0xdb, 0x41, 0xd0, 0x27, 0xfe, 0x26, 0xbe,
+ 0x2d, 0x72, 0xb0, 0x30, 0xef, 0x67, 0x33, 0x2a, 0xc4, 0x31, 0x1c, 0xfd, 0xaa, 0x06, 0x73, 0x3e,
+ 0x79, 0xb7, 0x6f, 0xfb, 0xc4, 0x62, 0x44, 0x03, 0x11, 0x2c, 0x8d, 0x47, 0x0b, 0xd0, 0x5e, 0xc1,
+ 0x09, 0xa4, 0x9c, 0xcf, 0xa5, 0xef, 0x20, 0x09, 0xc4, 0xa9, 0x1e, 0xd0, 0xa1, 0x0a, 0xec, 0x8e,
+ 0x6b, 0xbb, 0x9d, 0x55, 0xa7, 0x13, 0x2c, 0x95, 0xd9, 0xd2, 0x65, 0x43, 0xd5, 0x8e, 0x8b, 0xb1,
+ 0x5a, 0x07, 0xbd, 0x08, 0xb3, 0xfd, 0x80, 0x72, 0x6e, 0x97, 0xf0, 0xf1, 0x9d, 0x8e, 0x43, 0xe8,
+ 0x36, 0x55, 0x00, 0x4e, 0xd6, 0xa3, 0xf6, 0x58, 0x54, 0x20, 0x46, 0x19, 0xf8, 0x8d, 0x4b, 0xda,
+ 0xcf, 0xcd, 0x04, 0x04, 0xa7, 0x6a, 0x2e, 0xaf, 0xc2, 0x85, 0x8c, 0xcf, 0x3c, 0xd5, 0xf2, 0xf8,
+ 0xad, 0x12, 0x7c, 0xe8, 0x44, 0xae, 0x44, 0xff, 0x50, 0x83, 0x0a, 0xd9, 0x0f, 0x7d, 0x43, 0x06,
+ 0xf4, 0xd1, 0x29, 0xda, 0x3e, 0x97, 0x25, 0xb0, 0xb2, 0x16, 0x13, 0xe2, 0xd3, 0x26, 0xb7, 0x3b,
+ 0x05, 0x82, 0xd5, 0xfe, 0x50, 0xd5, 0x9a, 0xdf, 0xc7, 0x55, 0x7d, 0x6b, 0x22, 0xab, 0xaf, 0x80,
+ 0x2c, 0x7f, 0x0e, 0x16, 0xd2, 0x98, 0x4f, 0x35, 0x52, 0xff, 0xa2, 0x04, 0x13, 0x2d, 0xc7, 0x70,
+ 0x1f, 0x45, 0xd6, 0xf8, 0xbf, 0x9c, 0x48, 0xe6, 0x50, 0x28, 0x43, 0x06, 0xeb, 0x6a, 0x6e, 0xa2,
+ 0x97, 0x4e, 0x2a, 0xd1, 0xcb, 0x6b, 0xc5, 0x49, 0x1c, 0x9f, 0xd7, 0xe5, 0x8f, 0x34, 0x98, 0x66,
+ 0xf5, 0x1e, 0x41, 0x4e, 0x88, 0xb7, 0x93, 0x39, 0x21, 0x5e, 0x2a, 0xfc, 0x4d, 0x39, 0x29, 0x20,
+ 0x7e, 0x18, 0x7d, 0x0b, 0xcb, 0xf8, 0xf0, 0x45, 0x35, 0x9b, 0x39, 0xff, 0x98, 0x67, 0xb3, 0xd2,
+ 0xa7, 0xdc, 0xf6, 0x4c, 0xc3, 0x49, 0x6b, 0x70, 0xc7, 0xa7, 0x34, 0xef, 0xc2, 0x34, 0x11, 0xd7,
+ 0xa4, 0xa3, 0x8f, 0x29, 0xa4, 0xd2, 0x46, 0x77, 0xad, 0x63, 0x72, 0x51, 0x49, 0x80, 0x63, 0x0a,
+ 0xfa, 0xbf, 0x2d, 0x41, 0x45, 0x99, 0xcb, 0xf7, 0x22, 0x4b, 0xcb, 0xf5, 0xcc, 0x44, 0xbf, 0x25,
+ 0x16, 0xb1, 0x77, 0xf9, 0x14, 0xc9, 0xc2, 0x7d, 0xa8, 0x98, 0x71, 0xc2, 0xb9, 0x51, 0x98, 0x5b,
+ 0xc9, 0x5b, 0x27, 0x42, 0x86, 0xe3, 0x02, 0xac, 0x12, 0xd1, 0xff, 0x75, 0x09, 0xa6, 0x5a, 0xbe,
+ 0x47, 0x27, 0xf8, 0x11, 0x88, 0x06, 0x23, 0x21, 0x1a, 0x8a, 0xad, 0x5b, 0xde, 0xd9, 0x5c, 0xe1,
+ 0x60, 0xa7, 0x84, 0xc3, 0xea, 0x28, 0x44, 0x8e, 0x17, 0x0f, 0xff, 0x51, 0x83, 0x8a, 0xa8, 0xf9,
+ 0x08, 0x04, 0xc4, 0x97, 0x92, 0x02, 0xe2, 0x95, 0x11, 0xbe, 0x2b, 0x47, 0x44, 0xfc, 0xba, 0x06,
+ 0xb3, 0xa2, 0xc6, 0x3a, 0xe9, 0x6e, 0x11, 0x1f, 0x5d, 0x87, 0xa9, 0xa0, 0xcf, 0x26, 0x52, 0x7c,
+ 0xd0, 0x93, 0xaa, 0x90, 0xf0, 0xb7, 0x0c, 0x93, 0xe5, 0xac, 0xe7, 0x55, 0x94, 0xdc, 0x4a, 0xbc,
+ 0x00, 0x47, 0x8d, 0xa9, 0x09, 0xe7, 0x7b, 0xce, 0xc0, 0xc5, 0x76, 0xec, 0x39, 0x04, 0x33, 0x08,
+ 0xb5, 0x9c, 0xe8, 0xdf, 0xe8, 0xf0, 0x88, 0x59, 0x4e, 0x14, 0x1c, 0x60, 0x5e, 0xae, 0x7f, 0x7d,
+ 0x5c, 0x0e, 0x36, 0x93, 0x60, 0x37, 0x61, 0xda, 0xf4, 0x89, 0x11, 0x12, 0xab, 0x76, 0x30, 0x4c,
+ 0xe7, 0x98, 0x16, 0x57, 0x8f, 0x5a, 0xe0, 0xb8, 0x31, 0x55, 0x98, 0xd4, 0xf3, 0xa0, 0x52, 0xac,
+ 0x5b, 0xe6, 0x9e, 0x05, 0x7d, 0x16, 0x26, 0xbc, 0xfb, 0xae, 0x0c, 0x84, 0x38, 0x96, 0x30, 0xfb,
+ 0x94, 0x3b, 0xb4, 0x36, 0xe6, 0x8d, 0x58, 0x26, 0x0f, 0x91, 0xfb, 0x81, 0x2b, 0xb2, 0x95, 0xac,
+ 0xbc, 0x0f, 0xc8, 0x81, 0xa9, 0x2e, 0x9b, 0x06, 0xee, 0xe5, 0x1e, 0x8d, 0x95, 0xf9, 0x84, 0xaa,
+ 0xf9, 0x04, 0x19, 0x66, 0x1c, 0x91, 0xa0, 0x8a, 0x2f, 0x55, 0xce, 0x82, 0x9e, 0x61, 0x12, 0x55,
+ 0xf1, 0xdd, 0x88, 0x0a, 0x71, 0x0c, 0x47, 0x07, 0x50, 0xe1, 0x37, 0x92, 0xb9, 0x94, 0x9d, 0x2a,
+ 0xee, 0x80, 0x14, 0xdd, 0xbb, 0x1b, 0x63, 0xe3, 0x43, 0xaf, 0x14, 0x60, 0x95, 0x96, 0xfe, 0xcb,
+ 0x63, 0x92, 0x49, 0x85, 0xc4, 0xcf, 0xce, 0xb3, 0xae, 0x15, 0x7a, 0xaf, 0xe1, 0x93, 0x30, 0xd1,
+ 0xdb, 0x31, 0x82, 0x88, 0x53, 0xa3, 0xdc, 0x97, 0x13, 0x2d, 0x5a, 0xf8, 0xf0, 0xb0, 0x3a, 0x23,
+ 0x48, 0xb3, 0xdf, 0x98, 0xd7, 0x45, 0x7d, 0xb8, 0x10, 0x84, 0x86, 0x43, 0xda, 0xb6, 0xf0, 0x16,
+ 0x05, 0xa1, 0xd1, 0xed, 0x15, 0x48, 0x61, 0xc9, 0x8e, 0x93, 0xda, 0x83, 0xa8, 0x70, 0x16, 0x7e,
+ 0xf4, 0xd7, 0x34, 0x58, 0x62, 0xe5, 0xab, 0xfd, 0xd0, 0xe3, 0x59, 0x69, 0x63, 0xe2, 0xa7, 0x3f,
+ 0x4c, 0x65, 0xd6, 0x7d, 0x3b, 0x07, 0x1f, 0xce, 0xa5, 0xa4, 0xff, 0xa9, 0x06, 0x68, 0x70, 0x16,
+ 0x91, 0x03, 0x65, 0x8b, 0x6c, 0x1b, 0x7d, 0x27, 0x8c, 0x76, 0xe1, 0x42, 0xb7, 0x56, 0x63, 0x94,
+ 0xb1, 0x70, 0x6c, 0x08, 0xbc, 0x58, 0x52, 0x40, 0x1e, 0x4c, 0xdf, 0xdf, 0xb1, 0x43, 0xe2, 0xd8,
+ 0x41, 0x28, 0x04, 0xe4, 0xa8, 0xe4, 0xa4, 0xda, 0xf1, 0x66, 0x84, 0x18, 0xc7, 0x34, 0xf4, 0x5f,
+ 0x1c, 0x83, 0xf2, 0x29, 0x5e, 0xd7, 0xe9, 0x03, 0x12, 0xd7, 0xd5, 0xa9, 0xae, 0x42, 0x46, 0xf1,
+ 0x52, 0x31, 0x8d, 0xa2, 0x3e, 0x80, 0x0c, 0x67, 0x10, 0x40, 0x5f, 0x86, 0x8b, 0xb6, 0xbb, 0xed,
+ 0x1b, 0x41, 0xe8, 0xf7, 0xcd, 0xb0, 0xef, 0x47, 0x84, 0xc7, 0x8a, 0x10, 0x66, 0xd6, 0x7d, 0x33,
+ 0x03, 0x1d, 0xce, 0x24, 0x82, 0x08, 0x4c, 0xdd, 0xf7, 0xfc, 0x5d, 0x2a, 0xbf, 0xc6, 0x8b, 0xa7,
+ 0xf0, 0x7e, 0x93, 0xa1, 0x88, 0x05, 0x17, 0xff, 0x1d, 0xe0, 0x08, 0xb7, 0xfe, 0xc7, 0x1a, 0x4c,
+ 0xf0, 0xeb, 0x4f, 0x1f, 0x08, 0xd3, 0x86, 0x75, 0x35, 0x37, 0x2d, 0x1d, 0xb5, 0x38, 0x58, 0x8d,
+ 0x0f, 0x88, 0xc5, 0xc1, 0xfa, 0x9a, 0xa3, 0x4e, 0xfc, 0xf1, 0x98, 0xf8, 0x16, 0xb6, 0x5f, 0x37,
+ 0xe1, 0x82, 0xd0, 0x3b, 0x6f, 0xdb, 0xdb, 0x84, 0x72, 0x57, 0xc3, 0x38, 0x08, 0xc4, 0x5d, 0x5e,
+ 0x26, 0xf8, 0xea, 0x83, 0x60, 0x9c, 0xd5, 0x06, 0xfd, 0x1b, 0x8d, 0xee, 0x8c, 0xa1, 0x6f, 0x9b,
+ 0x23, 0x65, 0xd0, 0x93, 0x7d, 0x5b, 0x59, 0xe7, 0xc8, 0xb8, 0xc1, 0xbe, 0x19, 0x6f, 0x91, 0xac,
+ 0xf4, 0xe1, 0x61, 0xb5, 0x9a, 0xe1, 0x0d, 0x8c, 0xbc, 0xd7, 0x74, 0x60, 0xbf, 0xf6, 0xe3, 0x63,
+ 0xab, 0x30, 0x4f, 0x7a, 0xd4, 0x63, 0x74, 0x13, 0x26, 0x02, 0xd3, 0xeb, 0x91, 0xe3, 0x1e, 0x91,
+ 0x4a, 0x5b, 0x5c, 0x72, 0x80, 0xdb, 0xb4, 0x25, 0xe6, 0x08, 0x96, 0xdf, 0x81, 0x19, 0xb5, 0xe7,
+ 0x19, 0x0e, 0x81, 0x86, 0xea, 0x10, 0x38, 0xf5, 0xb1, 0x9a, 0xea, 0x40, 0xf8, 0xfd, 0x12, 0x88,
+ 0x77, 0x37, 0x86, 0x38, 0x2f, 0xb0, 0xa3, 0xac, 0x61, 0x23, 0xbc, 0x35, 0x92, 0x7e, 0xcc, 0x2f,
+ 0x1e, 0x03, 0x35, 0x71, 0x18, 0x72, 0x61, 0xd2, 0x31, 0xb6, 0x88, 0x13, 0xbd, 0xc2, 0x70, 0xbd,
+ 0xf8, 0x33, 0x01, 0x3c, 0xbb, 0x6e, 0x90, 0xf2, 0x2a, 0xf3, 0x42, 0x2c, 0xa8, 0x2c, 0xbf, 0x04,
+ 0x15, 0xa5, 0xda, 0xa9, 0x7c, 0x30, 0xdf, 0xd4, 0xe0, 0x72, 0xc4, 0x12, 0xc9, 0xeb, 0xfc, 0xe8,
+ 0x59, 0x28, 0x1b, 0x3d, 0x9b, 0xb9, 0x25, 0x55, 0xc7, 0xee, 0x6a, 0xab, 0xc9, 0xca, 0xb0, 0x84,
+ 0xa2, 0x8f, 0x43, 0x39, 0x9a, 0x27, 0xa1, 0xa3, 0xc8, 0x25, 0x2e, 0xcf, 0x53, 0x64, 0x0d, 0xf4,
+ 0x8c, 0x92, 0x07, 0x6d, 0x22, 0xde, 0xd1, 0x24, 0x61, 0x7e, 0x46, 0xaa, 0xff, 0x5a, 0x09, 0x66,
+ 0xb9, 0x2d, 0x5f, 0xb3, 0x5d, 0xcb, 0x76, 0x3b, 0x8f, 0x40, 0x94, 0x26, 0xde, 0x53, 0x2b, 0x9d,
+ 0xd5, 0x7b, 0x6a, 0xb7, 0x60, 0xf2, 0x5d, 0xba, 0xac, 0x23, 0x76, 0x18, 0x6a, 0x75, 0xc9, 0xb9,
+ 0x66, 0x12, 0x21, 0xc0, 0x02, 0x85, 0xfe, 0x5f, 0x35, 0x58, 0x4c, 0x0c, 0xcb, 0x23, 0x10, 0xca,
+ 0xdb, 0x49, 0xa1, 0xbc, 0x5a, 0x2c, 0xa9, 0x85, 0xd2, 0xe7, 0x1c, 0xe1, 0xfc, 0xbb, 0x25, 0x18,
+ 0x6f, 0x13, 0x62, 0x3d, 0x82, 0x99, 0x7e, 0x3b, 0xb1, 0x69, 0x7e, 0xb6, 0xf0, 0xdb, 0x1a, 0x79,
+ 0x16, 0xff, 0x76, 0xca, 0xe2, 0xff, 0x5c, 0x61, 0x0a, 0xc7, 0x9b, 0xfb, 0xbf, 0x51, 0x02, 0xa0,
+ 0xd5, 0xf8, 0x8b, 0x56, 0x22, 0x46, 0x2f, 0x7e, 0xf3, 0x70, 0xfa, 0xfd, 0xf2, 0x52, 0xa1, 0x2e,
+ 0xdf, 0x4d, 0x1a, 0x8b, 0xfd, 0xc9, 0xc9, 0x37, 0x93, 0x92, 0xab, 0x6f, 0xfc, 0x8c, 0x56, 0x9f,
+ 0xfe, 0xcf, 0x35, 0x60, 0x79, 0x95, 0x1b, 0x1b, 0x6d, 0xf4, 0x22, 0xcc, 0xda, 0xfc, 0xb4, 0xae,
+ 0xa1, 0x26, 0x8f, 0x62, 0x67, 0x0a, 0x4d, 0x15, 0x80, 0x93, 0xf5, 0x50, 0x57, 0x19, 0xd7, 0x11,
+ 0xde, 0x95, 0x13, 0xfd, 0x90, 0xd9, 0x43, 0x67, 0xb2, 0x27, 0x46, 0xff, 0x71, 0x09, 0xe6, 0x53,
+ 0x75, 0x87, 0xd0, 0xe7, 0xcf, 0x47, 0x7a, 0x29, 0x09, 0x45, 0xc7, 0xce, 0x3f, 0xa1, 0xa8, 0xcc,
+ 0xed, 0x39, 0x7e, 0xbe, 0xb9, 0x3d, 0xbf, 0xa7, 0x01, 0x7b, 0x1c, 0xee, 0x11, 0x48, 0xcf, 0xbf,
+ 0x94, 0x94, 0x9e, 0x9f, 0x29, 0xca, 0x38, 0x39, 0x42, 0xf3, 0xb7, 0x4b, 0xc0, 0xf2, 0xed, 0x8b,
+ 0xe0, 0x04, 0xe5, 0xbc, 0x5f, 0xcb, 0x39, 0xef, 0xbf, 0x2a, 0xc2, 0x05, 0x52, 0x8e, 0x2f, 0x25,
+ 0x64, 0xe0, 0xe3, 0x4a, 0x44, 0xc0, 0x58, 0x52, 0x8c, 0x0c, 0x46, 0x05, 0xa0, 0x07, 0x30, 0x1b,
+ 0xec, 0x78, 0x5e, 0x18, 0x99, 0xc0, 0x62, 0xee, 0x56, 0x0b, 0x87, 0xd4, 0x46, 0x9f, 0xc2, 0x17,
+ 0x66, 0x5b, 0xc5, 0x8d, 0x93, 0xa4, 0xd0, 0x0a, 0xc0, 0x96, 0xe3, 0x99, 0xbb, 0xf5, 0x66, 0x03,
+ 0x47, 0x81, 0x97, 0x2c, 0xa2, 0xa8, 0x26, 0x4b, 0xb1, 0x52, 0x43, 0xff, 0x43, 0x8d, 0x8f, 0xd6,
+ 0x29, 0x96, 0xd5, 0x23, 0x94, 0x92, 0x1f, 0x4d, 0x49, 0xc9, 0xbc, 0xd7, 0xe5, 0xbe, 0x2b, 0xbe,
+ 0x42, 0x3e, 0xdd, 0xe4, 0xc0, 0xac, 0xa3, 0x3e, 0xc3, 0x20, 0xd8, 0xb8, 0xd0, 0x0b, 0x0e, 0xe2,
+ 0xc1, 0x42, 0xa5, 0x08, 0x27, 0x91, 0x53, 0x31, 0x1a, 0x75, 0x5c, 0x7d, 0x96, 0x9a, 0x35, 0x6c,
+ 0xa9, 0x00, 0x9c, 0xac, 0xa7, 0xbf, 0x01, 0x1f, 0xe1, 0xdd, 0x66, 0x71, 0xcf, 0x6b, 0xfb, 0x26,
+ 0x09, 0x82, 0xba, 0xd1, 0x33, 0x4c, 0xaa, 0xd8, 0xb3, 0xeb, 0x82, 0xdc, 0xe3, 0x75, 0x8a, 0xa7,
+ 0xe3, 0xff, 0xbf, 0x06, 0x55, 0x05, 0x67, 0x22, 0x12, 0x24, 0x62, 0xd0, 0x6f, 0x69, 0x50, 0x31,
+ 0x5c, 0xd7, 0x0b, 0x0d, 0xf5, 0x00, 0xc6, 0x2a, 0xfe, 0xa0, 0x56, 0x2e, 0xa9, 0x95, 0xd5, 0x98,
+ 0x4c, 0xea, 0xa8, 0x55, 0x81, 0x60, 0xb5, 0x37, 0xcb, 0x9f, 0x83, 0x85, 0x74, 0xab, 0x53, 0xa9,
+ 0xf0, 0x35, 0xb8, 0xa4, 0xf4, 0x4a, 0x5c, 0xbc, 0xa2, 0xfa, 0xf2, 0x73, 0x30, 0xb5, 0x67, 0x07,
+ 0x76, 0x74, 0x81, 0x57, 0x19, 0xc5, 0x7b, 0xbc, 0x18, 0x47, 0x70, 0xfd, 0x75, 0xb8, 0xa0, 0xe2,
+ 0x60, 0x4b, 0x6c, 0xa3, 0x7d, 0x9a, 0x79, 0x58, 0x87, 0xab, 0x0a, 0x86, 0xcc, 0xdb, 0x47, 0xa7,
+ 0x41, 0xf7, 0xd5, 0xc9, 0x88, 0xc3, 0x45, 0x88, 0xfc, 0x77, 0x35, 0x78, 0x82, 0xe4, 0x31, 0x8c,
+ 0x60, 0xf7, 0x2f, 0x8c, 0x38, 0xa3, 0xb9, 0x0c, 0x29, 0xd2, 0xbb, 0xe4, 0x81, 0x71, 0x7e, 0xcf,
+ 0xd0, 0x01, 0x40, 0x20, 0xa7, 0x64, 0x94, 0xe8, 0xc9, 0xcc, 0x39, 0x16, 0x29, 0x3c, 0xe5, 0x6f,
+ 0xac, 0x10, 0x43, 0xef, 0x42, 0x39, 0x10, 0x33, 0x39, 0xca, 0xe5, 0xc8, 0x0c, 0xc6, 0x10, 0x01,
+ 0x62, 0xe2, 0x17, 0x96, 0x64, 0xd0, 0x6f, 0x6a, 0x70, 0xd1, 0xc9, 0x58, 0x17, 0x62, 0x4b, 0x68,
+ 0x9f, 0xc3, 0x92, 0xe3, 0x2e, 0xc0, 0x2c, 0x08, 0xce, 0xec, 0x0a, 0xfa, 0xc7, 0xb9, 0x37, 0xf1,
+ 0x78, 0xcc, 0xf2, 0xdd, 0x11, 0x3b, 0x79, 0x56, 0x97, 0xf2, 0xbe, 0x39, 0xc5, 0x55, 0x14, 0xe6,
+ 0xa9, 0xda, 0x82, 0xc9, 0x2d, 0xa6, 0xe2, 0x0b, 0x56, 0x2f, 0x6c, 0x4f, 0x88, 0xe7, 0xba, 0x99,
+ 0xfe, 0xcd, 0xff, 0xc7, 0x02, 0x33, 0x7a, 0x0b, 0xc6, 0x2c, 0x37, 0x7a, 0xcd, 0xe3, 0x95, 0x11,
+ 0xf4, 0xdb, 0x38, 0xc0, 0x94, 0x72, 0x07, 0x45, 0x8a, 0x5c, 0x28, 0xbb, 0x62, 0x57, 0x17, 0xbc,
+ 0x58, 0xf8, 0x3d, 0x43, 0xa9, 0x1d, 0x48, 0x9d, 0x24, 0x2a, 0xc1, 0x92, 0x06, 0xa5, 0x27, 0x15,
+ 0xf6, 0xf1, 0xd1, 0xe8, 0x0d, 0xf5, 0xe8, 0x7b, 0x4b, 0xd5, 0xbd, 0x4f, 0xf1, 0x94, 0xf6, 0x6c,
+ 0xae, 0xde, 0x4d, 0x60, 0x32, 0x34, 0x58, 0xbc, 0xc2, 0x64, 0xf1, 0x78, 0x01, 0xda, 0xff, 0xbb,
+ 0x14, 0x4b, 0xac, 0x4a, 0xb0, 0x9f, 0x01, 0x16, 0xc8, 0x29, 0x63, 0xf1, 0xa7, 0x16, 0xc5, 0x81,
+ 0x59, 0x61, 0xc6, 0xe2, 0x79, 0xec, 0x39, 0x63, 0xf1, 0xff, 0xb1, 0xc0, 0x8c, 0xde, 0xa1, 0xea,
+ 0x24, 0x97, 0xe3, 0x22, 0x1b, 0xd3, 0xeb, 0x23, 0xae, 0xb1, 0x20, 0x0a, 0x51, 0xe5, 0xbf, 0xb0,
+ 0xc4, 0x8f, 0xb6, 0x60, 0x4a, 0x98, 0x6e, 0xe2, 0x62, 0xed, 0x2b, 0x23, 0x64, 0xfb, 0x8d, 0x9e,
+ 0x66, 0xe0, 0x97, 0xdb, 0x22, 0xc4, 0xfa, 0x3f, 0x29, 0x73, 0xa3, 0x5b, 0x9c, 0xf5, 0x6d, 0x43,
+ 0x39, 0x42, 0x37, 0x4a, 0xb8, 0x74, 0x94, 0x8d, 0x9d, 0x7f, 0x5a, 0xf4, 0x0b, 0x4b, 0xdc, 0xa8,
+ 0x9e, 0x15, 0x99, 0x1e, 0x67, 0xcc, 0x1b, 0x2e, 0x2a, 0x3d, 0x19, 0x8a, 0x32, 0xf6, 0x28, 0x42,
+ 0x51, 0xce, 0xf2, 0xed, 0xfa, 0x57, 0x61, 0x3e, 0x8a, 0x14, 0xb1, 0x08, 0x73, 0x08, 0x8b, 0x18,
+ 0x4a, 0x76, 0x57, 0xab, 0x9e, 0x04, 0xe1, 0x74, 0x5d, 0xf4, 0xfb, 0x1a, 0x94, 0x4d, 0xb1, 0x4b,
+ 0x8b, 0x75, 0x75, 0x7b, 0x34, 0xcf, 0xcc, 0x4a, 0xb4, 0xe9, 0x73, 0x75, 0xef, 0x5e, 0x24, 0x23,
+ 0xa2, 0xe2, 0x33, 0xf2, 0xd4, 0xcb, 0x5e, 0xa3, 0xff, 0x40, 0x95, 0x59, 0x87, 0xe5, 0x88, 0x67,
+ 0x57, 0x5f, 0x78, 0x70, 0xe7, 0x9d, 0x11, 0xbf, 0x62, 0x35, 0xc6, 0xc8, 0x3f, 0xe4, 0x8b, 0x52,
+ 0x6f, 0x8d, 0x21, 0x67, 0xf4, 0x2d, 0x6a, 0xf7, 0x97, 0x77, 0x61, 0x36, 0x31, 0x82, 0xe7, 0x79,
+ 0x60, 0xb0, 0xec, 0xc2, 0x42, 0xfa, 0x43, 0xcf, 0xf5, 0x80, 0xe2, 0x16, 0x4c, 0x4b, 0x09, 0x8c,
+ 0x9e, 0x56, 0x08, 0xc5, 0x3b, 0xe4, 0x2d, 0x72, 0xc0, 0xa9, 0x56, 0x13, 0x0a, 0x3e, 0x37, 0xdb,
+ 0xef, 0xd1, 0x02, 0x81, 0x50, 0xff, 0x5f, 0x1a, 0x97, 0x3a, 0xe2, 0x39, 0x12, 0x03, 0x2a, 0x5d,
+ 0x9e, 0x18, 0x8a, 0x25, 0x22, 0x2d, 0x76, 0x17, 0x8d, 0x85, 0x35, 0xac, 0xc7, 0x68, 0xb0, 0x8a,
+ 0x13, 0xdd, 0x1f, 0x7c, 0x30, 0xe7, 0xfa, 0x68, 0xdb, 0xc3, 0xd0, 0xef, 0xe6, 0xa0, 0xc1, 0x36,
+ 0xea, 0xf3, 0x24, 0xda, 0xf1, 0xcf, 0x93, 0x9c, 0xfc, 0x68, 0x85, 0xfe, 0x1d, 0x0d, 0x32, 0xd3,
+ 0x29, 0x23, 0x1d, 0x26, 0x79, 0x30, 0xb5, 0xfa, 0x92, 0x10, 0x8f, 0xb4, 0xc6, 0x02, 0x82, 0x7c,
+ 0xb8, 0x28, 0x42, 0x95, 0x6f, 0x91, 0x83, 0xf8, 0x0d, 0x1b, 0xc1, 0x30, 0xc3, 0x07, 0x2d, 0xb2,
+ 0x84, 0x31, 0xed, 0x14, 0x26, 0x9c, 0x89, 0x9b, 0x45, 0xcb, 0x32, 0x8d, 0xfb, 0x83, 0x71, 0xa4,
+ 0xcc, 0xba, 0x7a, 0xbe, 0xd1, 0xb2, 0x9c, 0xc4, 0xc9, 0xd1, 0xb2, 0xac, 0xde, 0x07, 0xe4, 0xec,
+ 0x9a, 0xf5, 0x35, 0xc7, 0xd3, 0xf7, 0x3d, 0x0d, 0x16, 0x07, 0xae, 0x9c, 0x0f, 0x75, 0xf2, 0xf9,
+ 0xc8, 0x1c, 0x58, 0xcf, 0xa4, 0x9f, 0x82, 0xa9, 0x64, 0x66, 0xa2, 0x78, 0x13, 0x66, 0x13, 0x8e,
+ 0x3e, 0x79, 0x83, 0x49, 0xcb, 0xbc, 0xc1, 0xa4, 0x5e, 0x50, 0x2a, 0x1d, 0x7b, 0x41, 0xe9, 0x68,
+ 0x46, 0x4c, 0x37, 0x33, 0x9a, 0xde, 0x86, 0x49, 0x76, 0x8d, 0x28, 0x7a, 0x0a, 0xeb, 0xe5, 0xc2,
+ 0xd7, 0x93, 0x02, 0x2e, 0x06, 0xf8, 0xff, 0x58, 0x60, 0x45, 0x0d, 0x58, 0x60, 0x4f, 0x24, 0xb7,
+ 0x7c, 0x6f, 0xdb, 0x76, 0xc8, 0x46, 0x2c, 0x71, 0xe4, 0xa5, 0xe5, 0x7a, 0x0a, 0x8e, 0x07, 0x5a,
+ 0x20, 0xcc, 0xcd, 0x2e, 0xbe, 0x10, 0x5e, 0x2c, 0xe8, 0xf0, 0xe6, 0x69, 0xa1, 0xa4, 0xb9, 0xf5,
+ 0x2e, 0x00, 0x89, 0x26, 0x2e, 0x8a, 0x74, 0x79, 0xb5, 0xd8, 0x75, 0x6c, 0x39, 0xfd, 0x91, 0xc4,
+ 0x90, 0x45, 0x01, 0x56, 0x88, 0x20, 0x1f, 0x2a, 0x3b, 0xf1, 0x4b, 0x39, 0xc2, 0x06, 0x7a, 0x6d,
+ 0xc4, 0x57, 0x7a, 0xf8, 0x06, 0xa5, 0x14, 0x60, 0x95, 0x08, 0xf2, 0x13, 0x4f, 0x53, 0x8f, 0xf0,
+ 0x0e, 0x41, 0xac, 0x3a, 0x9f, 0xf4, 0x2c, 0x35, 0xa5, 0xe9, 0xca, 0x7b, 0x80, 0xa3, 0x18, 0x4d,
+ 0xf1, 0x6d, 0xc2, 0x98, 0x66, 0x5c, 0x86, 0x15, 0x2a, 0x74, 0x6c, 0xbb, 0xf1, 0xdd, 0x4f, 0x61,
+ 0x43, 0xbd, 0x36, 0xe2, 0x1d, 0x5c, 0xb1, 0xf9, 0xc7, 0x05, 0x58, 0x25, 0x82, 0x5c, 0x80, 0xae,
+ 0xbc, 0x12, 0x2a, 0x6c, 0xa9, 0x42, 0xdf, 0x19, 0x5f, 0x2c, 0xe5, 0xde, 0xaa, 0xf8, 0x37, 0x56,
+ 0x28, 0x50, 0x23, 0x51, 0x5a, 0xec, 0x50, 0xdc, 0x8a, 0x1a, 0xca, 0x5a, 0xff, 0x74, 0xac, 0x49,
+ 0x54, 0xd8, 0x7a, 0x7d, 0x52, 0xd1, 0x22, 0x1e, 0x1e, 0x56, 0x67, 0x98, 0x0c, 0x19, 0xd0, 0x2a,
+ 0x62, 0xf7, 0xfc, 0xcc, 0x71, 0xee, 0x79, 0x74, 0x03, 0x16, 0x03, 0xf5, 0xb8, 0x9b, 0x09, 0x86,
+ 0x59, 0xd6, 0x44, 0x5e, 0x21, 0x6e, 0xa7, 0x2b, 0xe0, 0xc1, 0x36, 0x5c, 0xf0, 0x11, 0x8b, 0xb5,
+ 0x9f, 0x53, 0x05, 0x1f, 0x2f, 0xc3, 0x12, 0x8a, 0xf6, 0x52, 0x2f, 0x38, 0xcf, 0x8f, 0x6a, 0x66,
+ 0x0f, 0xf7, 0x9c, 0x33, 0xfa, 0xb2, 0xfa, 0x4a, 0xe7, 0x42, 0xf1, 0xb8, 0xa7, 0xec, 0x6b, 0xc0,
+ 0x27, 0xbc, 0xd4, 0xd9, 0x4f, 0x46, 0xfc, 0x2e, 0x9e, 0x49, 0x88, 0xa5, 0x74, 0xd8, 0xe7, 0x46,
+ 0xfb, 0x7e, 0xa7, 0x0c, 0x15, 0x45, 0xf7, 0x78, 0x2f, 0x6e, 0x77, 0x84, 0x50, 0x31, 0x3d, 0x37,
+ 0x08, 0x7d, 0x43, 0xb9, 0xd1, 0x32, 0x22, 0x4d, 0xf9, 0xe1, 0xf5, 0x18, 0x33, 0x56, 0xc9, 0xd0,
+ 0x25, 0x2a, 0x1d, 0x1d, 0x63, 0x67, 0xe0, 0xe8, 0x90, 0x4b, 0x34, 0xc3, 0xd9, 0xf1, 0x29, 0x80,
+ 0x48, 0xd2, 0xcb, 0x67, 0x10, 0x65, 0x8a, 0xb0, 0x66, 0x70, 0x53, 0xc2, 0xb0, 0x52, 0x0f, 0x3d,
+ 0x80, 0x59, 0x47, 0x4d, 0x0f, 0x24, 0xb6, 0xa1, 0x42, 0x47, 0x91, 0x89, 0x3c, 0x43, 0xd1, 0xa9,
+ 0x98, 0x52, 0x84, 0x93, 0xa4, 0x28, 0x1b, 0x38, 0x51, 0x4a, 0xab, 0x91, 0x9c, 0x76, 0x32, 0x31,
+ 0x56, 0xcc, 0x06, 0xb2, 0x28, 0xc0, 0x0a, 0x91, 0x1c, 0xcf, 0xca, 0x54, 0x21, 0xcf, 0x4a, 0x1f,
+ 0x2e, 0xf8, 0x24, 0xf4, 0x0f, 0xea, 0x07, 0x26, 0x7b, 0xec, 0xcc, 0x0f, 0x59, 0x3e, 0xa9, 0x72,
+ 0xb1, 0x80, 0x71, 0x3c, 0x88, 0x0a, 0x67, 0xe1, 0x4f, 0x88, 0xb8, 0xe9, 0x63, 0x45, 0xdc, 0xa7,
+ 0xa1, 0x12, 0x12, 0x73, 0xc7, 0xb5, 0x4d, 0xc3, 0x69, 0x36, 0xc4, 0xa5, 0xce, 0x78, 0xb5, 0xc6,
+ 0x20, 0xac, 0xd6, 0x43, 0x35, 0x18, 0xeb, 0xdb, 0x96, 0x90, 0xf3, 0x3f, 0x23, 0xdf, 0xbc, 0x6c,
+ 0x36, 0x1e, 0x1e, 0x56, 0x3f, 0x14, 0xbb, 0x2a, 0xe4, 0x57, 0x5d, 0xeb, 0xed, 0x76, 0xae, 0x85,
+ 0x07, 0x3d, 0x12, 0xac, 0x6c, 0x36, 0x1b, 0x98, 0x36, 0xce, 0xf2, 0x3a, 0xcd, 0x0c, 0xef, 0x75,
+ 0xd2, 0x6f, 0x03, 0xc4, 0xc2, 0x64, 0x64, 0x3f, 0xc0, 0x4f, 0x27, 0xe0, 0xd2, 0xa8, 0xe7, 0x6b,
+ 0x2c, 0xf1, 0x12, 0xcb, 0x06, 0xbf, 0xba, 0x1d, 0x12, 0xff, 0xce, 0x9d, 0xf5, 0xbb, 0x3b, 0x3e,
+ 0x09, 0x76, 0x3c, 0xc7, 0x2a, 0x98, 0xf9, 0x29, 0x7e, 0x4c, 0x60, 0x00, 0x23, 0xce, 0xa1, 0x84,
+ 0xea, 0xb0, 0x18, 0x65, 0xa4, 0xc7, 0x46, 0x48, 0x6a, 0x7d, 0x3f, 0x08, 0x45, 0x58, 0x20, 0x73,
+ 0x68, 0xae, 0xa5, 0x81, 0x78, 0xb0, 0x7e, 0x1a, 0x09, 0xcf, 0x4c, 0x3f, 0xce, 0x1e, 0xac, 0x1b,
+ 0x40, 0xc2, 0xd3, 0xd3, 0x0f, 0xd6, 0x57, 0x91, 0xc8, 0x07, 0xee, 0x98, 0xec, 0x48, 0x21, 0x89,
+ 0x5f, 0xbf, 0x1b, 0xac, 0x8f, 0x2c, 0x78, 0xca, 0x27, 0xa6, 0xd7, 0xed, 0x12, 0xd7, 0xe2, 0x29,
+ 0xfb, 0x0c, 0xbf, 0x63, 0xbb, 0xd7, 0x7d, 0x83, 0x55, 0x14, 0xaf, 0xe8, 0x5d, 0x3d, 0x3a, 0xac,
+ 0x3e, 0x85, 0x8f, 0xa9, 0x87, 0x8f, 0xc5, 0x82, 0xba, 0x30, 0xdf, 0x67, 0x99, 0x4a, 0xfc, 0xa6,
+ 0x1b, 0x12, 0x7f, 0xcf, 0x70, 0x0a, 0xbe, 0xb7, 0xc0, 0x78, 0x77, 0x33, 0x89, 0x0a, 0xa7, 0x71,
+ 0xa3, 0x03, 0x2a, 0x16, 0x44, 0x77, 0x14, 0x92, 0xe5, 0xe2, 0xa9, 0xc9, 0xf0, 0x20, 0x3a, 0x9c,
+ 0x45, 0x43, 0xff, 0xfb, 0x1a, 0x88, 0x93, 0x04, 0x6a, 0x1f, 0x2a, 0x46, 0x6e, 0xf9, 0xbd, 0x7f,
+ 0x77, 0xf7, 0x3e, 0x08, 0x04, 0x2c, 0x1f, 0xd4, 0x50, 0xd9, 0x84, 0x4e, 0x7e, 0x48, 0x35, 0xce,
+ 0x67, 0x34, 0x96, 0x9b, 0xcf, 0xe8, 0xbb, 0x1a, 0xa4, 0xdf, 0xdd, 0xa2, 0x56, 0xb7, 0xb8, 0x74,
+ 0x22, 0x42, 0xd7, 0x79, 0x6c, 0x15, 0x2f, 0xc2, 0x11, 0x2c, 0xa9, 0xab, 0x8d, 0x10, 0xa3, 0x9e,
+ 0x1d, 0x2f, 0x7c, 0xbc, 0xae, 0xa6, 0xff, 0x70, 0x0e, 0x26, 0xf9, 0x35, 0x09, 0x2a, 0x7b, 0x32,
+ 0xc2, 0x31, 0x6e, 0x15, 0xbf, 0x88, 0x51, 0x20, 0xea, 0x22, 0x91, 0xb6, 0xa2, 0x74, 0x6c, 0xda,
+ 0x0a, 0xcc, 0x93, 0x8a, 0x8d, 0x60, 0x9f, 0xd7, 0x71, 0x53, 0xe4, 0xb1, 0x16, 0x09, 0xc5, 0x50,
+ 0x98, 0x30, 0x5c, 0xc7, 0x8b, 0xa7, 0x4b, 0xe3, 0x03, 0xa0, 0x98, 0xaf, 0x73, 0xc7, 0x98, 0xae,
+ 0x71, 0x5c, 0xfa, 0x44, 0x71, 0x67, 0xae, 0x18, 0xf2, 0x21, 0xe2, 0xd2, 0x25, 0xc7, 0x4f, 0xe6,
+ 0x72, 0xfc, 0x36, 0x4c, 0x09, 0xb9, 0x21, 0x84, 0xd8, 0x2b, 0x23, 0x24, 0x0c, 0x53, 0x2e, 0x12,
+ 0xf2, 0x02, 0x1c, 0x21, 0xa7, 0x3b, 0x63, 0xd7, 0xd8, 0xb7, 0xbb, 0xfd, 0x2e, 0x93, 0x5c, 0x13,
+ 0x6a, 0x55, 0x56, 0x8c, 0x23, 0x38, 0xab, 0xca, 0x7d, 0xe0, 0x4c, 0x1f, 0x51, 0xab, 0x8a, 0x77,
+ 0x18, 0x22, 0x38, 0x7a, 0x0b, 0xca, 0x5d, 0x63, 0xbf, 0xdd, 0xf7, 0x3b, 0x44, 0x98, 0xac, 0xf9,
+ 0x8e, 0xd8, 0x7e, 0x68, 0x3b, 0x2b, 0x54, 0x8b, 0x0e, 0xfd, 0x95, 0xa6, 0x1b, 0xde, 0xf1, 0xdb,
+ 0xa1, 0x2f, 0x73, 0x23, 0xad, 0x0b, 0x2c, 0x58, 0xe2, 0x43, 0x0e, 0xcc, 0x75, 0x8d, 0xfd, 0x4d,
+ 0xd7, 0x90, 0xe9, 0xe2, 0x2b, 0x05, 0x29, 0x30, 0xbf, 0xdd, 0x7a, 0x02, 0x17, 0x4e, 0xe1, 0xce,
+ 0x70, 0x11, 0xce, 0x9c, 0x97, 0x8b, 0x70, 0x55, 0x9e, 0x6b, 0xcf, 0x32, 0x26, 0x7c, 0x22, 0xcb,
+ 0x5b, 0x7e, 0xfc, 0x99, 0xf5, 0xdb, 0xf2, 0xcc, 0x7a, 0xae, 0xb8, 0x5f, 0xef, 0x98, 0xf3, 0xea,
+ 0x3e, 0x54, 0x2c, 0xf9, 0xee, 0x7b, 0xb0, 0x34, 0x5f, 0xdc, 0xae, 0x8c, 0x9f, 0x8f, 0x57, 0x92,
+ 0xb9, 0xc6, 0xa8, 0xb1, 0x4a, 0x07, 0xdd, 0xe1, 0xe9, 0xc4, 0x1d, 0x12, 0xc6, 0x55, 0x98, 0x5e,
+ 0xbc, 0xc0, 0x5d, 0x07, 0x51, 0xf6, 0xef, 0x81, 0x0a, 0x38, 0xbb, 0x1d, 0x55, 0x25, 0x79, 0x30,
+ 0xed, 0x62, 0x7c, 0x7f, 0x39, 0x71, 0x51, 0xe5, 0x97, 0x34, 0x58, 0xe0, 0xcf, 0xbc, 0xd4, 0xbd,
+ 0x6e, 0xcf, 0x73, 0x09, 0x9d, 0x16, 0xc4, 0xc6, 0xf4, 0xf3, 0xc5, 0x65, 0x43, 0x3b, 0x85, 0x51,
+ 0x1c, 0x73, 0xa4, 0x4a, 0xf1, 0x00, 0x65, 0xf4, 0x7b, 0x1a, 0x2c, 0x75, 0x73, 0x72, 0xa5, 0x2e,
+ 0x5d, 0x28, 0x1e, 0x9c, 0x73, 0x52, 0xfe, 0x55, 0xfe, 0xf4, 0xd4, 0x49, 0xb5, 0x70, 0x6e, 0xdf,
+ 0x46, 0x0d, 0xe1, 0x1b, 0xe5, 0x02, 0xcf, 0xcf, 0xc3, 0x42, 0x7a, 0x0f, 0x50, 0x13, 0xa0, 0x6b,
+ 0xe7, 0x9a, 0x00, 0x5d, 0x7f, 0x15, 0x2e, 0x67, 0xcf, 0x39, 0x55, 0x89, 0xd8, 0x4b, 0x02, 0xc2,
+ 0x12, 0x89, 0xd3, 0x7b, 0xd1, 0x42, 0xcc, 0x61, 0xb5, 0x4f, 0x7c, 0xff, 0x27, 0x57, 0x1e, 0xfb,
+ 0xc1, 0x4f, 0xae, 0x3c, 0xf6, 0xa3, 0x9f, 0x5c, 0x79, 0xec, 0xab, 0x47, 0x57, 0xb4, 0xef, 0x1f,
+ 0x5d, 0xd1, 0x7e, 0x70, 0x74, 0x45, 0xfb, 0xd1, 0xd1, 0x15, 0xed, 0x7f, 0x1c, 0x5d, 0xd1, 0x7e,
+ 0xe5, 0x7f, 0x5e, 0x79, 0xec, 0xad, 0x29, 0xd1, 0xa3, 0x3f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x29,
+ 0xa3, 0x30, 0xff, 0x7f, 0xaa, 0x00, 0x00,
+}
+
+func (m *Addon) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Addon) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Addon) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *Addons) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Addons) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Addons) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NginxIngress != nil {
+ {
+ size, err := m.NginxIngress.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.KubernetesDashboard != nil {
+ {
+ size, err := m.KubernetesDashboard.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AdmissionPlugin) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AdmissionPlugin) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionPlugin) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Config != nil {
+ {
+ size, err := m.Config.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Alerting) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Alerting) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Alerting) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.EmailReceivers) > 0 {
+ for iNdEx := len(m.EmailReceivers) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EmailReceivers[iNdEx])
+ copy(dAtA[i:], m.EmailReceivers[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmailReceivers[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuditConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuditConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuditConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AuditPolicy != nil {
+ {
+ size, err := m.AuditPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuditPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuditPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuditPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConfigMapRef != nil {
+ {
+ size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AvailabilityZone) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AvailabilityZone) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AvailabilityZone) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.UnavailableVolumeTypes) > 0 {
+ for iNdEx := len(m.UnavailableVolumeTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UnavailableVolumeTypes[iNdEx])
+ copy(dAtA[i:], m.UnavailableVolumeTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnavailableVolumeTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.UnavailableMachineTypes) > 0 {
+ for iNdEx := len(m.UnavailableMachineTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UnavailableMachineTypes[iNdEx])
+ copy(dAtA[i:], m.UnavailableMachineTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnavailableMachineTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucket) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucket) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SeedName != nil {
+ i -= len(*m.SeedName)
+ copy(dAtA[i:], *m.SeedName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SeedName)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupBucketStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupBucketStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupBucketStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.GeneratedSecretRef != nil {
+ {
+ size, err := m.GeneratedSecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x20
+ if m.LastError != nil {
+ {
+ size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LastOperation != nil {
+ {
+ size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ProviderStatus != nil {
+ {
+ size, err := m.ProviderStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntryList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntryList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntryList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntrySpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntrySpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntrySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SeedName != nil {
+ i -= len(*m.SeedName)
+ copy(dAtA[i:], *m.SeedName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SeedName)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.BucketName)
+ copy(dAtA[i:], m.BucketName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BucketName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BackupEntryStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BackupEntryStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BackupEntryStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x18
+ if m.LastError != nil {
+ {
+ size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.LastOperation != nil {
+ {
+ size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CRI) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CRI) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CRI) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ContainerRuntimes) > 0 {
+ for iNdEx := len(m.ContainerRuntimes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ContainerRuntimes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudProfile) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudProfile) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudProfileList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudProfileList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudProfileList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudProfileSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudProfileSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudProfileSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.VolumeTypes) > 0 {
+ for iNdEx := len(m.VolumeTypes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.VolumeTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x42
+ if m.SeedSelector != nil {
+ {
+ size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Regions) > 0 {
+ for iNdEx := len(m.Regions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Regions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.MachineTypes) > 0 {
+ for iNdEx := len(m.MachineTypes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MachineTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.MachineImages) > 0 {
+ for iNdEx := len(m.MachineImages) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MachineImages[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.CABundle != nil {
+ i -= len(*m.CABundle)
+ copy(dAtA[i:], *m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterAutoscaler) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterAutoscaler) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ScanInterval != nil {
+ {
+ size, err := m.ScanInterval.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.ScaleDownUtilizationThreshold != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.ScaleDownUtilizationThreshold))))
+ i--
+ dAtA[i] = 0x29
+ }
+ if m.ScaleDownUnneededTime != nil {
+ {
+ size, err := m.ScaleDownUnneededTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ScaleDownDelayAfterFailure != nil {
+ {
+ size, err := m.ScaleDownDelayAfterFailure.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ScaleDownDelayAfterDelete != nil {
+ {
+ size, err := m.ScaleDownDelayAfterDelete.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ScaleDownDelayAfterAdd != nil {
+ {
+ size, err := m.ScaleDownDelayAfterAdd.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Cloud.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Condition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Condition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Codes) > 0 {
+ for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Codes[iNdEx])
+ copy(dAtA[i:], m.Codes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ContainerRuntime) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ContainerRuntime) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerRuntime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerDeployment) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerDeployment) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SeedSelector != nil {
+ {
+ size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Policy != nil {
+ i -= len(*m.Policy)
+ copy(dAtA[i:], *m.Policy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Policy)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallationSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallationSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.SeedRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.RegistrationRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerInstallationStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerInstallationStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerInstallationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProviderStatus != nil {
+ {
+ size, err := m.ProviderStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRegistration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRegistration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRegistration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRegistrationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRegistrationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRegistrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerRegistrationSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerRegistrationSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerRegistrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Deployment != nil {
+ {
+ size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ControllerResource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ControllerResource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ControllerResource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Primary != nil {
+ i--
+ if *m.Primary {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ReconcileTimeout != nil {
+ {
+ size, err := m.ReconcileTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.GloballyEnabled != nil {
+ i--
+ if *m.GloballyEnabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DNS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DNS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DNS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Providers) > 0 {
+ for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Domain != nil {
+ i -= len(*m.Domain)
+ copy(dAtA[i:], *m.Domain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Domain)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DNSIncludeExclude) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DNSIncludeExclude) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DNSIncludeExclude) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Exclude) > 0 {
+ for iNdEx := len(m.Exclude) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Exclude[iNdEx])
+ copy(dAtA[i:], m.Exclude[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Exclude[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Include) > 0 {
+ for iNdEx := len(m.Include) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Include[iNdEx])
+ copy(dAtA[i:], m.Include[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Include[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DNSProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DNSProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Zones != nil {
+ {
+ size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Type != nil {
+ i -= len(*m.Type)
+ copy(dAtA[i:], *m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.SecretName != nil {
+ i -= len(*m.SecretName)
+ copy(dAtA[i:], *m.SecretName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretName)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Primary != nil {
+ i--
+ if *m.Primary {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Domains != nil {
+ {
+ size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DataVolume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DataVolume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DataVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Encrypted != nil {
+ i--
+ if *m.Encrypted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ i -= len(m.VolumeSize)
+ copy(dAtA[i:], m.VolumeSize)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize)))
+ i--
+ dAtA[i] = 0x1a
+ if m.Type != nil {
+ i -= len(*m.Type)
+ copy(dAtA[i:], *m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Endpoint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Purpose)
+ copy(dAtA[i:], m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Purpose)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.URL)
+ copy(dAtA[i:], m.URL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ExpirableVersion) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExpirableVersion) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExpirableVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Classification != nil {
+ i -= len(*m.Classification)
+ copy(dAtA[i:], *m.Classification)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Classification)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ExpirationDate != nil {
+ {
+ size, err := m.ExpirationDate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Extension) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Extension) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Disabled != nil {
+ i--
+ if *m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Gardener) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Gardener) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Gardener) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Hibernation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Hibernation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Hibernation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Schedules) > 0 {
+ for iNdEx := len(m.Schedules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Schedules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Enabled != nil {
+ i--
+ if *m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HibernationSchedule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HibernationSchedule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HibernationSchedule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Location != nil {
+ i -= len(*m.Location)
+ copy(dAtA[i:], *m.Location)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Location)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.End != nil {
+ i -= len(*m.End)
+ copy(dAtA[i:], *m.End)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.End)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Start != nil {
+ i -= len(*m.Start)
+ copy(dAtA[i:], *m.Start)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Start)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HorizontalPodAutoscalerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HorizontalPodAutoscalerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UpscaleDelay != nil {
+ {
+ size, err := m.UpscaleDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Tolerance != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Tolerance))))
+ i--
+ dAtA[i] = 0x31
+ }
+ if m.SyncPeriod != nil {
+ {
+ size, err := m.SyncPeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.InitialReadinessDelay != nil {
+ {
+ size, err := m.InitialReadinessDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.DownscaleStabilization != nil {
+ {
+ size, err := m.DownscaleStabilization.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.DownscaleDelay != nil {
+ {
+ size, err := m.DownscaleDelay.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CPUInitializationPeriod != nil {
+ {
+ size, err := m.CPUInitializationPeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Ingress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Controller.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Domain)
+ copy(dAtA[i:], m.Domain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Domain)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressController) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressController) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressController) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeAPIServerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeAPIServerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeAPIServerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Requests != nil {
+ {
+ size, err := m.Requests.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.WatchCacheSizes != nil {
+ {
+ size, err := m.WatchCacheSizes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.ServiceAccountConfig != nil {
+ {
+ size, err := m.ServiceAccountConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.RuntimeConfig) > 0 {
+ keysForRuntimeConfig := make([]string, 0, len(m.RuntimeConfig))
+ for k := range m.RuntimeConfig {
+ keysForRuntimeConfig = append(keysForRuntimeConfig, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRuntimeConfig)
+ for iNdEx := len(keysForRuntimeConfig) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.RuntimeConfig[string(keysForRuntimeConfig[iNdEx])]
+ baseI := i
+ i--
+ if v {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(keysForRuntimeConfig[iNdEx])
+ copy(dAtA[i:], keysForRuntimeConfig[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRuntimeConfig[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.OIDCConfig != nil {
+ {
+ size, err := m.OIDCConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.EnableBasicAuthentication != nil {
+ i--
+ if *m.EnableBasicAuthentication {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.AuditConfig != nil {
+ {
+ size, err := m.AuditConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.APIAudiences) > 0 {
+ for iNdEx := len(m.APIAudiences) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.APIAudiences[iNdEx])
+ copy(dAtA[i:], m.APIAudiences[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIAudiences[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.AdmissionPlugins) > 0 {
+ for iNdEx := len(m.AdmissionPlugins) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AdmissionPlugins[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeAPIServerRequests) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeAPIServerRequests) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeAPIServerRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxMutatingInflight != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxMutatingInflight))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.MaxNonMutatingInflight != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxNonMutatingInflight))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeControllerManagerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeControllerManagerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeControllerManagerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PodEvictionTimeout != nil {
+ {
+ size, err := m.PodEvictionTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.NodeCIDRMaskSize != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeCIDRMaskSize))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.HorizontalPodAutoscalerConfig != nil {
+ {
+ size, err := m.HorizontalPodAutoscalerConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeProxyConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeProxyConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Mode != nil {
+ i -= len(*m.Mode)
+ copy(dAtA[i:], *m.Mode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Mode)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeSchedulerConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeSchedulerConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeSchedulerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.KubeMaxPDVols != nil {
+ i -= len(*m.KubeMaxPDVols)
+ copy(dAtA[i:], *m.KubeMaxPDVols)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeMaxPDVols)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SystemReserved != nil {
+ {
+ size, err := m.SystemReserved.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ if m.KubeReserved != nil {
+ {
+ size, err := m.KubeReserved.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.FailSwapOn != nil {
+ i--
+ if *m.FailSwapOn {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x68
+ }
+ if m.ImagePullProgressDeadline != nil {
+ {
+ size, err := m.ImagePullProgressDeadline.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.PodPIDsLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.PodPIDsLimit))
+ i--
+ dAtA[i] = 0x58
+ }
+ if m.MaxPods != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxPods))
+ i--
+ dAtA[i] = 0x50
+ }
+ if m.EvictionSoftGracePeriod != nil {
+ {
+ size, err := m.EvictionSoftGracePeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.EvictionSoft != nil {
+ {
+ size, err := m.EvictionSoft.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.EvictionPressureTransitionPeriod != nil {
+ {
+ size, err := m.EvictionPressureTransitionPeriod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.EvictionMinimumReclaim != nil {
+ {
+ size, err := m.EvictionMinimumReclaim.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.EvictionMaxPodGracePeriod != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionMaxPodGracePeriod))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.EvictionHard != nil {
+ {
+ size, err := m.EvictionHard.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.CPUManagerPolicy != nil {
+ i -= len(*m.CPUManagerPolicy)
+ copy(dAtA[i:], *m.CPUManagerPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CPUManagerPolicy)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CPUCFSQuota != nil {
+ i--
+ if *m.CPUCFSQuota {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ {
+ size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigEviction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigEviction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigEviction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeFSInodesFree != nil {
+ i -= len(*m.NodeFSInodesFree)
+ copy(dAtA[i:], *m.NodeFSInodesFree)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeFSInodesFree)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeFSAvailable != nil {
+ i -= len(*m.NodeFSAvailable)
+ copy(dAtA[i:], *m.NodeFSAvailable)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeFSAvailable)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ImageFSInodesFree != nil {
+ i -= len(*m.ImageFSInodesFree)
+ copy(dAtA[i:], *m.ImageFSInodesFree)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageFSInodesFree)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ImageFSAvailable != nil {
+ i -= len(*m.ImageFSAvailable)
+ copy(dAtA[i:], *m.ImageFSAvailable)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageFSAvailable)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MemoryAvailable != nil {
+ i -= len(*m.MemoryAvailable)
+ copy(dAtA[i:], *m.MemoryAvailable)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MemoryAvailable)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeFSInodesFree != nil {
+ {
+ size, err := m.NodeFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeFSAvailable != nil {
+ {
+ size, err := m.NodeFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ImageFSInodesFree != nil {
+ {
+ size, err := m.ImageFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ImageFSAvailable != nil {
+ {
+ size, err := m.ImageFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MemoryAvailable != nil {
+ {
+ size, err := m.MemoryAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeFSInodesFree != nil {
+ {
+ size, err := m.NodeFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeFSAvailable != nil {
+ {
+ size, err := m.NodeFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ImageFSInodesFree != nil {
+ {
+ size, err := m.ImageFSInodesFree.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ImageFSAvailable != nil {
+ {
+ size, err := m.ImageFSAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MemoryAvailable != nil {
+ {
+ size, err := m.MemoryAvailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubeletConfigReserved) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubeletConfigReserved) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubeletConfigReserved) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PID != nil {
+ {
+ size, err := m.PID.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.EphemeralStorage != nil {
+ {
+ size, err := m.EphemeralStorage.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Memory != nil {
+ {
+ size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CPU != nil {
+ {
+ size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Kubernetes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Kubernetes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Kubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.VerticalPodAutoscaler != nil {
+ {
+ size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x42
+ if m.Kubelet != nil {
+ {
+ size, err := m.Kubelet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.KubeProxy != nil {
+ {
+ size, err := m.KubeProxy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.KubeScheduler != nil {
+ {
+ size, err := m.KubeScheduler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.KubeControllerManager != nil {
+ {
+ size, err := m.KubeControllerManager.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.KubeAPIServer != nil {
+ {
+ size, err := m.KubeAPIServer.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ClusterAutoscaler != nil {
+ {
+ size, err := m.ClusterAutoscaler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.AllowPrivilegedContainers != nil {
+ i--
+ if *m.AllowPrivilegedContainers {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.FeatureGates) > 0 {
+ keysForFeatureGates := make([]string, 0, len(m.FeatureGates))
+ for k := range m.FeatureGates {
+ keysForFeatureGates = append(keysForFeatureGates, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForFeatureGates)
+ for iNdEx := len(keysForFeatureGates) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.FeatureGates[string(keysForFeatureGates[iNdEx])]
+ baseI := i
+ i--
+ if v {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(keysForFeatureGates[iNdEx])
+ copy(dAtA[i:], keysForFeatureGates[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForFeatureGates[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesDashboard) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesDashboard) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesDashboard) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.AuthenticationMode != nil {
+ i -= len(*m.AuthenticationMode)
+ copy(dAtA[i:], *m.AuthenticationMode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AuthenticationMode)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KubernetesSettings) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KubernetesSettings) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KubernetesSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LastError) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LastError) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LastError) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LastUpdateTime != nil {
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Codes) > 0 {
+ for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Codes[iNdEx])
+ copy(dAtA[i:], m.Codes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.TaskID != nil {
+ i -= len(*m.TaskID)
+ copy(dAtA[i:], *m.TaskID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TaskID)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *LastOperation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LastOperation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LastOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.State)
+ copy(dAtA[i:], m.State)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.State)))
+ i--
+ dAtA[i] = 0x22
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Progress))
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Machine) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Machine) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Machine) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Image != nil {
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineControllerManagerSettings) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineControllerManagerSettings) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineControllerManagerSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeConditions) > 0 {
+ for iNdEx := len(m.NodeConditions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.NodeConditions[iNdEx])
+ copy(dAtA[i:], m.NodeConditions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeConditions[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.MaxEvictRetries != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxEvictRetries))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.MachineCreationTimeout != nil {
+ {
+ size, err := m.MachineCreationTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.MachineHealthTimeout != nil {
+ {
+ size, err := m.MachineHealthTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MachineDrainTimeout != nil {
+ {
+ size, err := m.MachineDrainTimeout.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineImage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineImage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineImageVersion) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineImageVersion) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineImageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.CRI) > 0 {
+ for iNdEx := len(m.CRI) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.CRI[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ExpirableVersion.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineType) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineType) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Usable != nil {
+ i--
+ if *m.Usable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.Storage != nil {
+ {
+ size, err := m.Storage.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.GPU.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MachineTypeStorage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MachineTypeStorage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MachineTypeStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.StorageSize.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Class)
+ copy(dAtA[i:], m.Class)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Class)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Maintenance) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Maintenance) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Maintenance) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConfineSpecUpdateRollout != nil {
+ i--
+ if *m.ConfineSpecUpdateRollout {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.TimeWindow != nil {
+ {
+ size, err := m.TimeWindow.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.AutoUpdate != nil {
+ {
+ size, err := m.AutoUpdate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MaintenanceAutoUpdate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MaintenanceAutoUpdate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MaintenanceAutoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.MachineImageVersion {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i--
+ if m.KubernetesVersion {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *MaintenanceTimeWindow) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MaintenanceTimeWindow) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MaintenanceTimeWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.End)
+ copy(dAtA[i:], m.End)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.End)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Begin)
+ copy(dAtA[i:], m.Begin)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Begin)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Monitoring) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Monitoring) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Monitoring) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Alerting != nil {
+ {
+ size, err := m.Alerting.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedResourceReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedResourceReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedResourceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ResourceRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Networking) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Networking) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Networking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Services != nil {
+ i -= len(*m.Services)
+ copy(dAtA[i:], *m.Services)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Services)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Nodes != nil {
+ i -= len(*m.Nodes)
+ copy(dAtA[i:], *m.Nodes)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Nodes)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Pods != nil {
+ i -= len(*m.Pods)
+ copy(dAtA[i:], *m.Pods)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pods)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NginxIngress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NginxIngress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NginxIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ExternalTrafficPolicy != nil {
+ i -= len(*m.ExternalTrafficPolicy)
+ copy(dAtA[i:], *m.ExternalTrafficPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExternalTrafficPolicy)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Config) > 0 {
+ keysForConfig := make([]string, 0, len(m.Config))
+ for k := range m.Config {
+ keysForConfig = append(keysForConfig, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForConfig)
+ for iNdEx := len(keysForConfig) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Config[string(keysForConfig[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForConfig[iNdEx])
+ copy(dAtA[i:], keysForConfig[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConfig[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.LoadBalancerSourceRanges) > 0 {
+ for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.LoadBalancerSourceRanges[iNdEx])
+ copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OIDCConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OIDCConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OIDCConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UsernamePrefix != nil {
+ i -= len(*m.UsernamePrefix)
+ copy(dAtA[i:], *m.UsernamePrefix)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernamePrefix)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.UsernameClaim != nil {
+ i -= len(*m.UsernameClaim)
+ copy(dAtA[i:], *m.UsernameClaim)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernameClaim)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.SigningAlgs) > 0 {
+ for iNdEx := len(m.SigningAlgs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SigningAlgs[iNdEx])
+ copy(dAtA[i:], m.SigningAlgs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SigningAlgs[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.RequiredClaims) > 0 {
+ keysForRequiredClaims := make([]string, 0, len(m.RequiredClaims))
+ for k := range m.RequiredClaims {
+ keysForRequiredClaims = append(keysForRequiredClaims, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims)
+ for iNdEx := len(keysForRequiredClaims) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.RequiredClaims[string(keysForRequiredClaims[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForRequiredClaims[iNdEx])
+ copy(dAtA[i:], keysForRequiredClaims[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequiredClaims[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.IssuerURL != nil {
+ i -= len(*m.IssuerURL)
+ copy(dAtA[i:], *m.IssuerURL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IssuerURL)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.GroupsPrefix != nil {
+ i -= len(*m.GroupsPrefix)
+ copy(dAtA[i:], *m.GroupsPrefix)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsPrefix)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.GroupsClaim != nil {
+ i -= len(*m.GroupsClaim)
+ copy(dAtA[i:], *m.GroupsClaim)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsClaim)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ClientID != nil {
+ i -= len(*m.ClientID)
+ copy(dAtA[i:], *m.ClientID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClientID)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ClientAuthentication != nil {
+ {
+ size, err := m.ClientAuthentication.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CABundle != nil {
+ i -= len(*m.CABundle)
+ copy(dAtA[i:], *m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *OpenIDConnectClientAuthentication) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OpenIDConnectClientAuthentication) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OpenIDConnectClientAuthentication) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Secret != nil {
+ i -= len(*m.Secret)
+ copy(dAtA[i:], *m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Secret)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ExtraConfig) > 0 {
+ keysForExtraConfig := make([]string, 0, len(m.ExtraConfig))
+ for k := range m.ExtraConfig {
+ keysForExtraConfig = append(keysForExtraConfig, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig)
+ for iNdEx := len(keysForExtraConfig) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.ExtraConfig[string(keysForExtraConfig[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForExtraConfig[iNdEx])
+ copy(dAtA[i:], keysForExtraConfig[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtraConfig[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Plant) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Plant) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Plant) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PlantList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlantList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PlantList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PlantSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlantSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PlantSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Endpoints) > 0 {
+ for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PlantStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlantStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PlantStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterInfo != nil {
+ {
+ size, err := m.ClusterInfo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ObservedGeneration != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Project) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Project) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectMember) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectMember) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Roles) > 0 {
+ for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Roles[iNdEx])
+ copy(dAtA[i:], m.Roles[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Roles[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Subject.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Tolerations != nil {
+ {
+ size, err := m.Tolerations.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Namespace != nil {
+ i -= len(*m.Namespace)
+ copy(dAtA[i:], *m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Purpose != nil {
+ i -= len(*m.Purpose)
+ copy(dAtA[i:], *m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Owner != nil {
+ {
+ size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Description != nil {
+ i -= len(*m.Description)
+ copy(dAtA[i:], *m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.CreatedBy != nil {
+ {
+ size, err := m.CreatedBy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.StaleAutoDeleteTimestamp != nil {
+ {
+ size, err := m.StaleAutoDeleteTimestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.StaleSinceTimestamp != nil {
+ {
+ size, err := m.StaleSinceTimestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Phase)
+ copy(dAtA[i:], m.Phase)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+ i--
+ dAtA[i] = 0x12
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectTolerations) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectTolerations) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectTolerations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Whitelist) > 0 {
+ for iNdEx := len(m.Whitelist) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Whitelist[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Defaults) > 0 {
+ for iNdEx := len(m.Defaults) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Defaults[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Provider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Provider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Workers) > 0 {
+ for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.InfrastructureConfig != nil {
+ {
+ size, err := m.InfrastructureConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ControlPlaneConfig != nil {
+ {
+ size, err := m.ControlPlaneConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Quota) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Quota) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *QuotaList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QuotaList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *QuotaSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QuotaSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Metrics) > 0 {
+ keysForMetrics := make([]string, 0, len(m.Metrics))
+ for k := range m.Metrics {
+ keysForMetrics = append(keysForMetrics, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics)
+ for iNdEx := len(keysForMetrics) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Metrics[k8s_io_api_core_v1.ResourceName(keysForMetrics[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForMetrics[iNdEx])
+ copy(dAtA[i:], keysForMetrics[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetrics[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.ClusterLifetimeDays != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ClusterLifetimeDays))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Region) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Region) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Region) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ keysForLabels := make([]string, 0, len(m.Labels))
+ for k := range m.Labels {
+ keysForLabels = append(keysForLabels, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Labels[string(keysForLabels[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForLabels[iNdEx])
+ copy(dAtA[i:], keysForLabels[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Zones) > 0 {
+ for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Zones[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceWatchCacheSize) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceWatchCacheSize) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceWatchCacheSize) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CacheSize))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.Resource)
+ copy(dAtA[i:], m.Resource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ i--
+ dAtA[i] = 0x12
+ if m.APIGroup != nil {
+ i -= len(*m.APIGroup)
+ copy(dAtA[i:], *m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Quotas) > 0 {
+ for iNdEx := len(m.Quotas) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Quotas[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretBindingList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretBindingList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Seed) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Seed) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Seed) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedBackup) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedBackup) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedBackup) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if m.Region != nil {
+ i -= len(*m.Region)
+ copy(dAtA[i:], *m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Region)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Provider)
+ copy(dAtA[i:], m.Provider)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provider)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedDNS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedDNS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Provider != nil {
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.IngressDomain != nil {
+ i -= len(*m.IngressDomain)
+ copy(dAtA[i:], *m.IngressDomain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressDomain)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedDNSProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedDNSProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedDNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Zones != nil {
+ {
+ size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Domains != nil {
+ {
+ size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedNetworks) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedNetworks) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedNetworks) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.BlockCIDRs) > 0 {
+ for iNdEx := len(m.BlockCIDRs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.BlockCIDRs[iNdEx])
+ copy(dAtA[i:], m.BlockCIDRs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BlockCIDRs[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.ShootDefaults != nil {
+ {
+ size, err := m.ShootDefaults.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.Services)
+ copy(dAtA[i:], m.Services)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Services)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Pods)
+ copy(dAtA[i:], m.Pods)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pods)))
+ i--
+ dAtA[i] = 0x12
+ if m.Nodes != nil {
+ i -= len(*m.Nodes)
+ copy(dAtA[i:], *m.Nodes)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Nodes)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x1a
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ProviderTypes) > 0 {
+ for iNdEx := len(m.ProviderTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ProviderTypes[iNdEx])
+ copy(dAtA[i:], m.ProviderTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.LabelSelector != nil {
+ {
+ size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingExcessCapacityReservation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingExcessCapacityReservation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingExcessCapacityReservation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingLoadBalancerServices) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingLoadBalancerServices) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingLoadBalancerServices) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingScheduling) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingScheduling) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Visible {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingShootDNS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingShootDNS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingShootDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSettings) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSettings) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.VerticalPodAutoscaler != nil {
+ {
+ size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.LoadBalancerServices != nil {
+ {
+ size, err := m.LoadBalancerServices.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ShootDNS != nil {
+ {
+ size, err := m.ShootDNS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Scheduling != nil {
+ {
+ size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ExcessCapacityReservation != nil {
+ {
+ size, err := m.ExcessCapacityReservation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Ingress != nil {
+ {
+ size, err := m.Ingress.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.Settings != nil {
+ {
+ size, err := m.Settings.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Volume != nil {
+ {
+ size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.Taints) > 0 {
+ for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.SecretRef != nil {
+ {
+ size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Networks.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.DNS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.Backup != nil {
+ {
+ size, err := m.Backup.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Allocatable) > 0 {
+ keysForAllocatable := make([]string, 0, len(m.Allocatable))
+ for k := range m.Allocatable {
+ keysForAllocatable = append(keysForAllocatable, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable)
+ for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Allocatable[k8s_io_api_core_v1.ResourceName(keysForAllocatable[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAllocatable[iNdEx])
+ copy(dAtA[i:], keysForAllocatable[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.Capacity) > 0 {
+ keysForCapacity := make([]string, 0, len(m.Capacity))
+ for k := range m.Capacity {
+ keysForCapacity = append(keysForCapacity, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Capacity[k8s_io_api_core_v1.ResourceName(keysForCapacity[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForCapacity[iNdEx])
+ copy(dAtA[i:], keysForCapacity[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.ClusterIdentity != nil {
+ i -= len(*m.ClusterIdentity)
+ copy(dAtA[i:], *m.ClusterIdentity)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x20
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.KubernetesVersion != nil {
+ i -= len(*m.KubernetesVersion)
+ copy(dAtA[i:], *m.KubernetesVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubernetesVersion)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Gardener != nil {
+ {
+ size, err := m.Gardener.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedTaint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedTaint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Value != nil {
+ i -= len(*m.Value)
+ copy(dAtA[i:], *m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedVolume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedVolume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Providers) > 0 {
+ for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.MinimumSize != nil {
+ {
+ size, err := m.MinimumSize.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SeedVolumeProvider) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SeedVolumeProvider) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeedVolumeProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Purpose)
+ copy(dAtA[i:], m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Purpose)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccountConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceAccountConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccountConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SigningKeySecret != nil {
+ {
+ size, err := m.SigningKeySecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Issuer != nil {
+ i -= len(*m.Issuer)
+ copy(dAtA[i:], *m.Issuer)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Issuer)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Shoot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Shoot) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Shoot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootMachineImage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootMachineImage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootMachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Version != nil {
+ i -= len(*m.Version)
+ copy(dAtA[i:], *m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootNetworks) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootNetworks) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootNetworks) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Services != nil {
+ i -= len(*m.Services)
+ copy(dAtA[i:], *m.Services)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Services)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Pods != nil {
+ i -= len(*m.Pods)
+ copy(dAtA[i:], *m.Pods)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pods)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Tolerations) > 0 {
+ for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ }
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ }
+ if m.SeedSelector != nil {
+ {
+ size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ if m.SeedName != nil {
+ i -= len(*m.SeedName)
+ copy(dAtA[i:], *m.SeedName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SeedName)))
+ i--
+ dAtA[i] = 0x72
+ }
+ i -= len(m.SecretBindingName)
+ copy(dAtA[i:], m.SecretBindingName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretBindingName)))
+ i--
+ dAtA[i] = 0x6a
+ i -= len(m.Region)
+ copy(dAtA[i:], m.Region)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region)))
+ i--
+ dAtA[i] = 0x62
+ if m.Purpose != nil {
+ i -= len(*m.Purpose)
+ copy(dAtA[i:], *m.Purpose)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ {
+ size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ if m.Monitoring != nil {
+ {
+ size, err := m.Monitoring.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.Maintenance != nil {
+ {
+ size, err := m.Maintenance.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ {
+ size, err := m.Networking.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if m.Hibernation != nil {
+ {
+ size, err := m.Hibernation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Extensions) > 0 {
+ for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Extensions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.DNS != nil {
+ {
+ size, err := m.DNS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.CloudProfileName)
+ copy(dAtA[i:], m.CloudProfileName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CloudProfileName)))
+ i--
+ dAtA[i] = 0x12
+ if m.Addons != nil {
+ {
+ size, err := m.Addons.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ShootStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ShootStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ShootStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterIdentity != nil {
+ i -= len(*m.ClusterIdentity)
+ copy(dAtA[i:], *m.ClusterIdentity)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity)))
+ i--
+ dAtA[i] = 0x62
+ }
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0x5a
+ i -= len(m.TechnicalID)
+ copy(dAtA[i:], m.TechnicalID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TechnicalID)))
+ i--
+ dAtA[i] = 0x52
+ if m.SeedName != nil {
+ i -= len(*m.SeedName)
+ copy(dAtA[i:], *m.SeedName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SeedName)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.RetryCycleStartTime != nil {
+ {
+ size, err := m.RetryCycleStartTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x38
+ if len(m.LastErrors) > 0 {
+ for iNdEx := len(m.LastErrors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.LastErrors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.LastOperation != nil {
+ {
+ size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i--
+ if m.IsHibernated {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.Gardener.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Constraints) > 0 {
+ for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Toleration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Toleration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Value != nil {
+ i -= len(*m.Value)
+ copy(dAtA[i:], *m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *VerticalPodAutoscaler) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *VerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RecommenderInterval != nil {
+ {
+ size, err := m.RecommenderInterval.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.UpdaterInterval != nil {
+ {
+ size, err := m.UpdaterInterval.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.RecommendationMarginFraction != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.RecommendationMarginFraction))))
+ i--
+ dAtA[i] = 0x31
+ }
+ if m.EvictionTolerance != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionTolerance))))
+ i--
+ dAtA[i] = 0x29
+ }
+ if m.EvictionRateLimit != nil {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionRateLimit))))
+ i--
+ dAtA[i] = 0x21
+ }
+ if m.EvictionRateBurst != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionRateBurst))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.EvictAfterOOMThreshold != nil {
+ {
+ size, err := m.EvictAfterOOMThreshold.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *Volume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Volume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Encrypted != nil {
+ i--
+ if *m.Encrypted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ i -= len(m.VolumeSize)
+ copy(dAtA[i:], m.VolumeSize)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize)))
+ i--
+ dAtA[i] = 0x1a
+ if m.Type != nil {
+ i -= len(*m.Type)
+ copy(dAtA[i:], *m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Name != nil {
+ i -= len(*m.Name)
+ copy(dAtA[i:], *m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *VolumeType) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *VolumeType) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Usable != nil {
+ i--
+ if *m.Usable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Class)
+ copy(dAtA[i:], m.Class)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Class)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchCacheSizes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCacheSizes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCacheSizes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Default != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Default))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Worker) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Worker) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Worker) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MachineControllerManagerSettings != nil {
+ {
+ size, err := m.MachineControllerManagerSettings.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.SystemComponents != nil {
+ {
+ size, err := m.SystemComponents.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if len(m.Zones) > 0 {
+ for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Zones[iNdEx])
+ copy(dAtA[i:], m.Zones[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Zones[iNdEx])))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ }
+ if m.KubeletDataVolumeName != nil {
+ i -= len(*m.KubeletDataVolumeName)
+ copy(dAtA[i:], *m.KubeletDataVolumeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeletDataVolumeName)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.DataVolumes) > 0 {
+ for iNdEx := len(m.DataVolumes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DataVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ }
+ if m.Volume != nil {
+ {
+ size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if len(m.Taints) > 0 {
+ for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ }
+ if m.ProviderConfig != nil {
+ {
+ size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ if m.MaxSurge != nil {
+ {
+ size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Minimum))
+ i--
+ dAtA[i] = 0x48
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Maximum))
+ i--
+ dAtA[i] = 0x40
+ {
+ size, err := m.Machine.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x32
+ if len(m.Labels) > 0 {
+ keysForLabels := make([]string, 0, len(m.Labels))
+ for k := range m.Labels {
+ keysForLabels = append(keysForLabels, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Labels[string(keysForLabels[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForLabels[iNdEx])
+ copy(dAtA[i:], keysForLabels[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Kubernetes != nil {
+ {
+ size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.CRI != nil {
+ {
+ size, err := m.CRI.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CABundle != nil {
+ i -= len(*m.CABundle)
+ copy(dAtA[i:], *m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WorkerKubernetes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkerKubernetes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkerKubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Kubelet != nil {
+ {
+ size, err := m.Kubelet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WorkerSystemComponents) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkerSystemComponents) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkerSystemComponents) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Allow {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Addon) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *Addons) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.KubernetesDashboard != nil {
+ l = m.KubernetesDashboard.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NginxIngress != nil {
+ l = m.NginxIngress.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AdmissionPlugin) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Config != nil {
+ l = m.Config.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Alerting) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.EmailReceivers) > 0 {
+ for _, s := range m.EmailReceivers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuditConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AuditPolicy != nil {
+ l = m.AuditPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AuditPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ConfigMapRef != nil {
+ l = m.ConfigMapRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AvailabilityZone) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.UnavailableMachineTypes) > 0 {
+ for _, s := range m.UnavailableMachineTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.UnavailableVolumeTypes) > 0 {
+ for _, s := range m.UnavailableVolumeTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BackupBucket) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BackupBucketList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BackupBucketProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BackupBucketSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SeedName != nil {
+ l = len(*m.SeedName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BackupBucketStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ProviderStatus != nil {
+ l = m.ProviderStatus.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastOperation != nil {
+ l = m.LastOperation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastError != nil {
+ l = m.LastError.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.GeneratedSecretRef != nil {
+ l = m.GeneratedSecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BackupEntry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BackupEntryList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BackupEntrySpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.BucketName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SeedName != nil {
+ l = len(*m.SeedName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BackupEntryStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LastOperation != nil {
+ l = m.LastOperation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastError != nil {
+ l = m.LastError.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ return n
+}
+
+func (m *CRI) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ContainerRuntimes) > 0 {
+ for _, e := range m.ContainerRuntimes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CloudInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CloudProfile) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CloudProfileList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CloudProfileSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CABundle != nil {
+ l = len(*m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.MachineImages) > 0 {
+ for _, e := range m.MachineImages {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.MachineTypes) > 0 {
+ for _, e := range m.MachineTypes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Regions) > 0 {
+ for _, e := range m.Regions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.SeedSelector != nil {
+ l = m.SeedSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeTypes) > 0 {
+ for _, e := range m.VolumeTypes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterAutoscaler) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ScaleDownDelayAfterAdd != nil {
+ l = m.ScaleDownDelayAfterAdd.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownDelayAfterDelete != nil {
+ l = m.ScaleDownDelayAfterDelete.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownDelayAfterFailure != nil {
+ l = m.ScaleDownDelayAfterFailure.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownUnneededTime != nil {
+ l = m.ScaleDownUnneededTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ScaleDownUtilizationThreshold != nil {
+ n += 9
+ }
+ if m.ScanInterval != nil {
+ l = m.ScanInterval.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ClusterInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Cloud.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Condition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Codes) > 0 {
+ for _, s := range m.Codes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ContainerRuntime) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerDeployment) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Policy != nil {
+ l = len(*m.Policy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SeedSelector != nil {
+ l = m.SeedSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerInstallation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ControllerInstallationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ControllerInstallationSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.RegistrationRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SeedRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ControllerInstallationStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ProviderStatus != nil {
+ l = m.ProviderStatus.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerRegistration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ControllerRegistrationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ControllerRegistrationSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Deployment != nil {
+ l = m.Deployment.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ControllerResource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.GloballyEnabled != nil {
+ n += 2
+ }
+ if m.ReconcileTimeout != nil {
+ l = m.ReconcileTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Primary != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *DNS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Domain != nil {
+ l = len(*m.Domain)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Providers) > 0 {
+ for _, e := range m.Providers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DNSIncludeExclude) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Include) > 0 {
+ for _, s := range m.Include {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Exclude) > 0 {
+ for _, s := range m.Exclude {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DNSProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Domains != nil {
+ l = m.Domains.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Primary != nil {
+ n += 2
+ }
+ if m.SecretName != nil {
+ l = len(*m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Type != nil {
+ l = len(*m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Zones != nil {
+ l = m.Zones.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DataVolume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Type != nil {
+ l = len(*m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.VolumeSize)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Encrypted != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *Endpoint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.URL)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ExpirableVersion) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ExpirationDate != nil {
+ l = m.ExpirationDate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Classification != nil {
+ l = len(*m.Classification)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Extension) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Disabled != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *Gardener) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Hibernation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Enabled != nil {
+ n += 2
+ }
+ if len(m.Schedules) > 0 {
+ for _, e := range m.Schedules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HibernationSchedule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Start != nil {
+ l = len(*m.Start)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.End != nil {
+ l = len(*m.End)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Location != nil {
+ l = len(*m.Location)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscalerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CPUInitializationPeriod != nil {
+ l = m.CPUInitializationPeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.DownscaleDelay != nil {
+ l = m.DownscaleDelay.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.DownscaleStabilization != nil {
+ l = m.DownscaleStabilization.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.InitialReadinessDelay != nil {
+ l = m.InitialReadinessDelay.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SyncPeriod != nil {
+ l = m.SyncPeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Tolerance != nil {
+ n += 9
+ }
+ if m.UpscaleDelay != nil {
+ l = m.UpscaleDelay.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Ingress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Domain)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Controller.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressController) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeAPIServerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AdmissionPlugins) > 0 {
+ for _, e := range m.AdmissionPlugins {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.APIAudiences) > 0 {
+ for _, s := range m.APIAudiences {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.AuditConfig != nil {
+ l = m.AuditConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EnableBasicAuthentication != nil {
+ n += 2
+ }
+ if m.OIDCConfig != nil {
+ l = m.OIDCConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.RuntimeConfig) > 0 {
+ for k, v := range m.RuntimeConfig {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.ServiceAccountConfig != nil {
+ l = m.ServiceAccountConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.WatchCacheSizes != nil {
+ l = m.WatchCacheSizes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Requests != nil {
+ l = m.Requests.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeAPIServerRequests) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxNonMutatingInflight != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxNonMutatingInflight))
+ }
+ if m.MaxMutatingInflight != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxMutatingInflight))
+ }
+ return n
+}
+
+func (m *KubeControllerManagerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.HorizontalPodAutoscalerConfig != nil {
+ l = m.HorizontalPodAutoscalerConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeCIDRMaskSize != nil {
+ n += 1 + sovGenerated(uint64(*m.NodeCIDRMaskSize))
+ }
+ if m.PodEvictionTimeout != nil {
+ l = m.PodEvictionTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeProxyConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Mode != nil {
+ l = len(*m.Mode)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeSchedulerConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.KubeMaxPDVols != nil {
+ l = len(*m.KubeMaxPDVols)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.KubernetesConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.CPUCFSQuota != nil {
+ n += 2
+ }
+ if m.CPUManagerPolicy != nil {
+ l = len(*m.CPUManagerPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionHard != nil {
+ l = m.EvictionHard.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionMaxPodGracePeriod != nil {
+ n += 1 + sovGenerated(uint64(*m.EvictionMaxPodGracePeriod))
+ }
+ if m.EvictionMinimumReclaim != nil {
+ l = m.EvictionMinimumReclaim.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionPressureTransitionPeriod != nil {
+ l = m.EvictionPressureTransitionPeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionSoft != nil {
+ l = m.EvictionSoft.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionSoftGracePeriod != nil {
+ l = m.EvictionSoftGracePeriod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxPods != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxPods))
+ }
+ if m.PodPIDsLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.PodPIDsLimit))
+ }
+ if m.ImagePullProgressDeadline != nil {
+ l = m.ImagePullProgressDeadline.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.FailSwapOn != nil {
+ n += 2
+ }
+ if m.KubeReserved != nil {
+ l = m.KubeReserved.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SystemReserved != nil {
+ l = m.SystemReserved.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigEviction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemoryAvailable != nil {
+ l = len(*m.MemoryAvailable)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSAvailable != nil {
+ l = len(*m.ImageFSAvailable)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSInodesFree != nil {
+ l = len(*m.ImageFSInodesFree)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSAvailable != nil {
+ l = len(*m.NodeFSAvailable)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSInodesFree != nil {
+ l = len(*m.NodeFSInodesFree)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigEvictionMinimumReclaim) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemoryAvailable != nil {
+ l = m.MemoryAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSAvailable != nil {
+ l = m.ImageFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSInodesFree != nil {
+ l = m.ImageFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSAvailable != nil {
+ l = m.NodeFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSInodesFree != nil {
+ l = m.NodeFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigEvictionSoftGracePeriod) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemoryAvailable != nil {
+ l = m.MemoryAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSAvailable != nil {
+ l = m.ImageFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageFSInodesFree != nil {
+ l = m.ImageFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSAvailable != nil {
+ l = m.NodeFSAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeFSInodesFree != nil {
+ l = m.NodeFSInodesFree.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubeletConfigReserved) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CPU != nil {
+ l = m.CPU.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Memory != nil {
+ l = m.Memory.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EphemeralStorage != nil {
+ l = m.EphemeralStorage.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PID != nil {
+ l = m.PID.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Kubernetes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AllowPrivilegedContainers != nil {
+ n += 2
+ }
+ if m.ClusterAutoscaler != nil {
+ l = m.ClusterAutoscaler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeAPIServer != nil {
+ l = m.KubeAPIServer.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeControllerManager != nil {
+ l = m.KubeControllerManager.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeScheduler != nil {
+ l = m.KubeScheduler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubeProxy != nil {
+ l = m.KubeProxy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Kubelet != nil {
+ l = m.Kubelet.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.VerticalPodAutoscaler != nil {
+ l = m.VerticalPodAutoscaler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KubernetesConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.FeatureGates) > 0 {
+ for k, v := range m.FeatureGates {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *KubernetesDashboard) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AuthenticationMode != nil {
+ l = len(*m.AuthenticationMode)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Addon.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *KubernetesInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *KubernetesSettings) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LastError) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TaskID != nil {
+ l = len(*m.TaskID)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Codes) > 0 {
+ for _, s := range m.Codes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.LastUpdateTime != nil {
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *LastOperation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Progress))
+ l = len(m.State)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Machine) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Image != nil {
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *MachineControllerManagerSettings) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MachineDrainTimeout != nil {
+ l = m.MachineDrainTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MachineHealthTimeout != nil {
+ l = m.MachineHealthTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MachineCreationTimeout != nil {
+ l = m.MachineCreationTimeout.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxEvictRetries != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxEvictRetries))
+ }
+ if len(m.NodeConditions) > 0 {
+ for _, s := range m.NodeConditions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MachineImage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MachineImageVersion) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ExpirableVersion.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.CRI) > 0 {
+ for _, e := range m.CRI {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MachineType) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.CPU.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.GPU.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Memory.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Storage != nil {
+ l = m.Storage.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Usable != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *MachineTypeStorage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Class)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.StorageSize.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Maintenance) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AutoUpdate != nil {
+ l = m.AutoUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.TimeWindow != nil {
+ l = m.TimeWindow.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ConfineSpecUpdateRollout != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *MaintenanceAutoUpdate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ return n
+}
+
+func (m *MaintenanceTimeWindow) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Begin)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.End)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Monitoring) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Alerting != nil {
+ l = m.Alerting.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NamedResourceReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ResourceRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Networking) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Pods != nil {
+ l = len(*m.Pods)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Nodes != nil {
+ l = len(*m.Nodes)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Services != nil {
+ l = len(*m.Services)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NginxIngress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Addon.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.LoadBalancerSourceRanges) > 0 {
+ for _, s := range m.LoadBalancerSourceRanges {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for k, v := range m.Config {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.ExternalTrafficPolicy != nil {
+ l = len(*m.ExternalTrafficPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *OIDCConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CABundle != nil {
+ l = len(*m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ClientAuthentication != nil {
+ l = m.ClientAuthentication.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ClientID != nil {
+ l = len(*m.ClientID)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupsClaim != nil {
+ l = len(*m.GroupsClaim)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupsPrefix != nil {
+ l = len(*m.GroupsPrefix)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.IssuerURL != nil {
+ l = len(*m.IssuerURL)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.RequiredClaims) > 0 {
+ for k, v := range m.RequiredClaims {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.SigningAlgs) > 0 {
+ for _, s := range m.SigningAlgs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.UsernameClaim != nil {
+ l = len(*m.UsernameClaim)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.UsernamePrefix != nil {
+ l = len(*m.UsernamePrefix)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *OpenIDConnectClientAuthentication) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExtraConfig) > 0 {
+ for k, v := range m.ExtraConfig {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.Secret != nil {
+ l = len(*m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Plant) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PlantList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PlantSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Endpoints) > 0 {
+ for _, e := range m.Endpoints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PlantStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ObservedGeneration != nil {
+ n += 1 + sovGenerated(uint64(*m.ObservedGeneration))
+ }
+ if m.ClusterInfo != nil {
+ l = m.ClusterInfo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Project) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ProjectList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ProjectMember) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Subject.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Role)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ProjectSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CreatedBy != nil {
+ l = m.CreatedBy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Description != nil {
+ l = len(*m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Owner != nil {
+ l = m.Owner.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Purpose != nil {
+ l = len(*m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Namespace != nil {
+ l = len(*m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Tolerations != nil {
+ l = m.Tolerations.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ProjectStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.StaleSinceTimestamp != nil {
+ l = m.StaleSinceTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.StaleAutoDeleteTimestamp != nil {
+ l = m.StaleAutoDeleteTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ProjectTolerations) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Defaults) > 0 {
+ for _, e := range m.Defaults {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Whitelist) > 0 {
+ for _, e := range m.Whitelist {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Provider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ControlPlaneConfig != nil {
+ l = m.ControlPlaneConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.InfrastructureConfig != nil {
+ l = m.InfrastructureConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Workers) > 0 {
+ for _, e := range m.Workers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Quota) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *QuotaList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *QuotaSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ClusterLifetimeDays != nil {
+ n += 1 + sovGenerated(uint64(*m.ClusterLifetimeDays))
+ }
+ if len(m.Metrics) > 0 {
+ for k, v := range m.Metrics {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = m.Scope.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Region) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Zones) > 0 {
+ for _, e := range m.Zones {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ResourceWatchCacheSize) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.APIGroup != nil {
+ l = len(*m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.CacheSize))
+ return n
+}
+
+func (m *SecretBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Quotas) > 0 {
+ for _, e := range m.Quotas {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SecretBindingList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Seed) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SeedBackup) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Provider)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Region != nil {
+ l = len(*m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SeedDNS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.IngressDomain != nil {
+ l = len(*m.IngressDomain)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Provider != nil {
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedDNSProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Domains != nil {
+ l = m.Domains.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Zones != nil {
+ l = m.Zones.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedNetworks) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Nodes != nil {
+ l = len(*m.Nodes)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Pods)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Services)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ShootDefaults != nil {
+ l = m.ShootDefaults.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.BlockCIDRs) > 0 {
+ for _, s := range m.BlockCIDRs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SeedSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LabelSelector != nil {
+ l = m.LabelSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ProviderTypes) > 0 {
+ for _, s := range m.ProviderTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedSettingExcessCapacityReservation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettingLoadBalancerServices) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *SeedSettingScheduling) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettingShootDNS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettingVerticalPodAutoscaler) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *SeedSettings) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ExcessCapacityReservation != nil {
+ l = m.ExcessCapacityReservation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Scheduling != nil {
+ l = m.Scheduling.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ShootDNS != nil {
+ l = m.ShootDNS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LoadBalancerServices != nil {
+ l = m.LoadBalancerServices.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.VerticalPodAutoscaler != nil {
+ l = m.VerticalPodAutoscaler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Backup != nil {
+ l = m.Backup.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.DNS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Networks.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Taints) > 0 {
+ for _, e := range m.Taints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Volume != nil {
+ l = m.Volume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Settings != nil {
+ l = m.Settings.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Ingress != nil {
+ l = m.Ingress.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Gardener != nil {
+ l = m.Gardener.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.KubernetesVersion != nil {
+ l = len(*m.KubernetesVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.ClusterIdentity != nil {
+ l = len(*m.ClusterIdentity)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Capacity) > 0 {
+ for k, v := range m.Capacity {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Allocatable) > 0 {
+ for k, v := range m.Allocatable {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *SeedTaint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Value != nil {
+ l = len(*m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SeedVolume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MinimumSize != nil {
+ l = m.MinimumSize.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Providers) > 0 {
+ for _, e := range m.Providers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SeedVolumeProvider) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ServiceAccountConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Issuer != nil {
+ l = len(*m.Issuer)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SigningKeySecret != nil {
+ l = m.SigningKeySecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Shoot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ShootList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ShootMachineImage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Version != nil {
+ l = len(*m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ShootNetworks) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pods != nil {
+ l = len(*m.Pods)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Services != nil {
+ l = len(*m.Services)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ShootSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Addons != nil {
+ l = m.Addons.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.CloudProfileName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.DNS != nil {
+ l = m.DNS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Extensions) > 0 {
+ for _, e := range m.Extensions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Hibernation != nil {
+ l = m.Hibernation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Networking.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Maintenance != nil {
+ l = m.Maintenance.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Monitoring != nil {
+ l = m.Monitoring.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Provider.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Purpose != nil {
+ l = len(*m.Purpose)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Region)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SecretBindingName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SeedName != nil {
+ l = len(*m.SeedName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SeedSelector != nil {
+ l = m.SeedSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Tolerations) > 0 {
+ for _, e := range m.Tolerations {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ShootStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Constraints) > 0 {
+ for _, e := range m.Constraints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Gardener.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.LastOperation != nil {
+ l = m.LastOperation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.LastErrors) > 0 {
+ for _, e := range m.LastErrors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.RetryCycleStartTime != nil {
+ l = m.RetryCycleStartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SeedName != nil {
+ l = len(*m.SeedName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.TechnicalID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ClusterIdentity != nil {
+ l = len(*m.ClusterIdentity)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Toleration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Value != nil {
+ l = len(*m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *VerticalPodAutoscaler) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ if m.EvictAfterOOMThreshold != nil {
+ l = m.EvictAfterOOMThreshold.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EvictionRateBurst != nil {
+ n += 1 + sovGenerated(uint64(*m.EvictionRateBurst))
+ }
+ if m.EvictionRateLimit != nil {
+ n += 9
+ }
+ if m.EvictionTolerance != nil {
+ n += 9
+ }
+ if m.RecommendationMarginFraction != nil {
+ n += 9
+ }
+ if m.UpdaterInterval != nil {
+ l = m.UpdaterInterval.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RecommenderInterval != nil {
+ l = m.RecommenderInterval.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Volume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Name != nil {
+ l = len(*m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Type != nil {
+ l = len(*m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.VolumeSize)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Encrypted != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *VolumeType) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Class)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Usable != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *WatchCacheSizes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Default != nil {
+ n += 1 + sovGenerated(uint64(*m.Default))
+ }
+ if len(m.Resources) > 0 {
+ for _, e := range m.Resources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Worker) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.CABundle != nil {
+ l = len(*m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CRI != nil {
+ l = m.CRI.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Kubernetes != nil {
+ l = m.Kubernetes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Machine.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Maximum))
+ n += 1 + sovGenerated(uint64(m.Minimum))
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ProviderConfig != nil {
+ l = m.ProviderConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Taints) > 0 {
+ for _, e := range m.Taints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Volume != nil {
+ l = m.Volume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.DataVolumes) > 0 {
+ for _, e := range m.DataVolumes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.KubeletDataVolumeName != nil {
+ l = len(*m.KubeletDataVolumeName)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Zones) > 0 {
+ for _, s := range m.Zones {
+ l = len(s)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.SystemComponents != nil {
+ l = m.SystemComponents.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.MachineControllerManagerSettings != nil {
+ l = m.MachineControllerManagerSettings.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *WorkerKubernetes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Kubelet != nil {
+ l = m.Kubelet.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *WorkerSystemComponents) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Addon) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Addon{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Addons) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Addons{`,
+ `KubernetesDashboard:` + strings.Replace(this.KubernetesDashboard.String(), "KubernetesDashboard", "KubernetesDashboard", 1) + `,`,
+ `NginxIngress:` + strings.Replace(this.NginxIngress.String(), "NginxIngress", "NginxIngress", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AdmissionPlugin) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AdmissionPlugin{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Alerting) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Alerting{`,
+ `EmailReceivers:` + fmt.Sprintf("%v", this.EmailReceivers) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AuditConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AuditConfig{`,
+ `AuditPolicy:` + strings.Replace(this.AuditPolicy.String(), "AuditPolicy", "AuditPolicy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AuditPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AuditPolicy{`,
+ `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ObjectReference", "v1.ObjectReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AvailabilityZone) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AvailabilityZone{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `UnavailableMachineTypes:` + fmt.Sprintf("%v", this.UnavailableMachineTypes) + `,`,
+ `UnavailableVolumeTypes:` + fmt.Sprintf("%v", this.UnavailableVolumeTypes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucket) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucket{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BackupBucketSpec", "BackupBucketSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BackupBucketStatus", "BackupBucketStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]BackupBucket{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BackupBucket", "BackupBucket", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BackupBucketList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucketProvider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucketSpec{`,
+ `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "BackupBucketProvider", "BackupBucketProvider", 1), `&`, ``, 1) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `SeedName:` + valueToStringGenerated(this.SeedName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupBucketStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupBucketStatus{`,
+ `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`,
+ `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `GeneratedSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.GeneratedSecretRef), "SecretReference", "v1.SecretReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntry) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupEntry{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BackupEntrySpec", "BackupEntrySpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BackupEntryStatus", "BackupEntryStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntryList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]BackupEntry{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BackupEntry", "BackupEntry", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BackupEntryList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntrySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupEntrySpec{`,
+ `BucketName:` + fmt.Sprintf("%v", this.BucketName) + `,`,
+ `SeedName:` + valueToStringGenerated(this.SeedName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BackupEntryStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BackupEntryStatus{`,
+ `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`,
+ `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CRI) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForContainerRuntimes := "[]ContainerRuntime{"
+ for _, f := range this.ContainerRuntimes {
+ repeatedStringForContainerRuntimes += strings.Replace(strings.Replace(f.String(), "ContainerRuntime", "ContainerRuntime", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForContainerRuntimes += "}"
+ s := strings.Join([]string{`&CRI{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ContainerRuntimes:` + repeatedStringForContainerRuntimes + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CloudInfo{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudProfile) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CloudProfile{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CloudProfileSpec", "CloudProfileSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudProfileList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]CloudProfile{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CloudProfile", "CloudProfile", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&CloudProfileList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudProfileSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForMachineImages := "[]MachineImage{"
+ for _, f := range this.MachineImages {
+ repeatedStringForMachineImages += strings.Replace(strings.Replace(f.String(), "MachineImage", "MachineImage", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMachineImages += "}"
+ repeatedStringForMachineTypes := "[]MachineType{"
+ for _, f := range this.MachineTypes {
+ repeatedStringForMachineTypes += strings.Replace(strings.Replace(f.String(), "MachineType", "MachineType", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMachineTypes += "}"
+ repeatedStringForRegions := "[]Region{"
+ for _, f := range this.Regions {
+ repeatedStringForRegions += strings.Replace(strings.Replace(f.String(), "Region", "Region", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRegions += "}"
+ repeatedStringForVolumeTypes := "[]VolumeType{"
+ for _, f := range this.VolumeTypes {
+ repeatedStringForVolumeTypes += strings.Replace(strings.Replace(f.String(), "VolumeType", "VolumeType", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVolumeTypes += "}"
+ s := strings.Join([]string{`&CloudProfileSpec{`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesSettings", "KubernetesSettings", 1), `&`, ``, 1) + `,`,
+ `MachineImages:` + repeatedStringForMachineImages + `,`,
+ `MachineTypes:` + repeatedStringForMachineTypes + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Regions:` + repeatedStringForRegions + `,`,
+ `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `VolumeTypes:` + repeatedStringForVolumeTypes + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterAutoscaler) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterAutoscaler{`,
+ `ScaleDownDelayAfterAdd:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterAdd), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownDelayAfterDelete:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterDelete), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownDelayAfterFailure:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterFailure), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownUnneededTime:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownUnneededTime), "Duration", "v11.Duration", 1) + `,`,
+ `ScaleDownUtilizationThreshold:` + valueToStringGenerated(this.ScaleDownUtilizationThreshold) + `,`,
+ `ScanInterval:` + strings.Replace(fmt.Sprintf("%v", this.ScanInterval), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterInfo{`,
+ `Cloud:` + strings.Replace(strings.Replace(this.Cloud.String(), "CloudInfo", "CloudInfo", 1), `&`, ``, 1) + `,`,
+ `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesInfo", "KubernetesInfo", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Condition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Condition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ContainerRuntime) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ContainerRuntime{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerDeployment) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerDeployment{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Policy:` + valueToStringGenerated(this.Policy) + `,`,
+ `SeedSelector:` + strings.Replace(fmt.Sprintf("%v", this.SeedSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerInstallation{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ControllerInstallationSpec", "ControllerInstallationSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ControllerInstallationStatus", "ControllerInstallationStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ControllerInstallation{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerInstallation", "ControllerInstallation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ControllerInstallationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallationSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerInstallationSpec{`,
+ `RegistrationRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RegistrationRef), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `SeedRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SeedRef), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerInstallationStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ControllerInstallationStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRegistration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerRegistration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ControllerRegistrationSpec", "ControllerRegistrationSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRegistrationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ControllerRegistration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRegistration", "ControllerRegistration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ControllerRegistrationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerRegistrationSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResources := "[]ControllerResource{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ControllerResource", "ControllerResource", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ s := strings.Join([]string{`&ControllerRegistrationSpec{`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `Deployment:` + strings.Replace(this.Deployment.String(), "ControllerDeployment", "ControllerDeployment", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ControllerResource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ControllerResource{`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `GloballyEnabled:` + valueToStringGenerated(this.GloballyEnabled) + `,`,
+ `ReconcileTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReconcileTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `Primary:` + valueToStringGenerated(this.Primary) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DNS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForProviders := "[]DNSProvider{"
+ for _, f := range this.Providers {
+ repeatedStringForProviders += strings.Replace(strings.Replace(f.String(), "DNSProvider", "DNSProvider", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForProviders += "}"
+ s := strings.Join([]string{`&DNS{`,
+ `Domain:` + valueToStringGenerated(this.Domain) + `,`,
+ `Providers:` + repeatedStringForProviders + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DNSIncludeExclude) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DNSIncludeExclude{`,
+ `Include:` + fmt.Sprintf("%v", this.Include) + `,`,
+ `Exclude:` + fmt.Sprintf("%v", this.Exclude) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DNSProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DNSProvider{`,
+ `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `Primary:` + valueToStringGenerated(this.Primary) + `,`,
+ `SecretName:` + valueToStringGenerated(this.SecretName) + `,`,
+ `Type:` + valueToStringGenerated(this.Type) + `,`,
+ `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DataVolume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DataVolume{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Type:` + valueToStringGenerated(this.Type) + `,`,
+ `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`,
+ `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Endpoint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Endpoint{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `URL:` + fmt.Sprintf("%v", this.URL) + `,`,
+ `Purpose:` + fmt.Sprintf("%v", this.Purpose) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExpirableVersion) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExpirableVersion{`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `ExpirationDate:` + strings.Replace(fmt.Sprintf("%v", this.ExpirationDate), "Time", "v11.Time", 1) + `,`,
+ `Classification:` + valueToStringGenerated(this.Classification) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Extension) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Extension{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Disabled:` + valueToStringGenerated(this.Disabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Gardener) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Gardener{`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Hibernation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSchedules := "[]HibernationSchedule{"
+ for _, f := range this.Schedules {
+ repeatedStringForSchedules += strings.Replace(strings.Replace(f.String(), "HibernationSchedule", "HibernationSchedule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSchedules += "}"
+ s := strings.Join([]string{`&Hibernation{`,
+ `Enabled:` + valueToStringGenerated(this.Enabled) + `,`,
+ `Schedules:` + repeatedStringForSchedules + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HibernationSchedule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HibernationSchedule{`,
+ `Start:` + valueToStringGenerated(this.Start) + `,`,
+ `End:` + valueToStringGenerated(this.End) + `,`,
+ `Location:` + valueToStringGenerated(this.Location) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HorizontalPodAutoscalerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HorizontalPodAutoscalerConfig{`,
+ `CPUInitializationPeriod:` + strings.Replace(fmt.Sprintf("%v", this.CPUInitializationPeriod), "Duration", "v11.Duration", 1) + `,`,
+ `DownscaleDelay:` + strings.Replace(fmt.Sprintf("%v", this.DownscaleDelay), "Duration", "v11.Duration", 1) + `,`,
+ `DownscaleStabilization:` + strings.Replace(fmt.Sprintf("%v", this.DownscaleStabilization), "Duration", "v11.Duration", 1) + `,`,
+ `InitialReadinessDelay:` + strings.Replace(fmt.Sprintf("%v", this.InitialReadinessDelay), "Duration", "v11.Duration", 1) + `,`,
+ `SyncPeriod:` + strings.Replace(fmt.Sprintf("%v", this.SyncPeriod), "Duration", "v11.Duration", 1) + `,`,
+ `Tolerance:` + valueToStringGenerated(this.Tolerance) + `,`,
+ `UpscaleDelay:` + strings.Replace(fmt.Sprintf("%v", this.UpscaleDelay), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Ingress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Ingress{`,
+ `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`,
+ `Controller:` + strings.Replace(strings.Replace(this.Controller.String(), "IngressController", "IngressController", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressController) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressController{`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeAPIServerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForAdmissionPlugins := "[]AdmissionPlugin{"
+ for _, f := range this.AdmissionPlugins {
+ repeatedStringForAdmissionPlugins += strings.Replace(strings.Replace(f.String(), "AdmissionPlugin", "AdmissionPlugin", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForAdmissionPlugins += "}"
+ keysForRuntimeConfig := make([]string, 0, len(this.RuntimeConfig))
+ for k := range this.RuntimeConfig {
+ keysForRuntimeConfig = append(keysForRuntimeConfig, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRuntimeConfig)
+ mapStringForRuntimeConfig := "map[string]bool{"
+ for _, k := range keysForRuntimeConfig {
+ mapStringForRuntimeConfig += fmt.Sprintf("%v: %v,", k, this.RuntimeConfig[k])
+ }
+ mapStringForRuntimeConfig += "}"
+ s := strings.Join([]string{`&KubeAPIServerConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `AdmissionPlugins:` + repeatedStringForAdmissionPlugins + `,`,
+ `APIAudiences:` + fmt.Sprintf("%v", this.APIAudiences) + `,`,
+ `AuditConfig:` + strings.Replace(this.AuditConfig.String(), "AuditConfig", "AuditConfig", 1) + `,`,
+ `EnableBasicAuthentication:` + valueToStringGenerated(this.EnableBasicAuthentication) + `,`,
+ `OIDCConfig:` + strings.Replace(this.OIDCConfig.String(), "OIDCConfig", "OIDCConfig", 1) + `,`,
+ `RuntimeConfig:` + mapStringForRuntimeConfig + `,`,
+ `ServiceAccountConfig:` + strings.Replace(this.ServiceAccountConfig.String(), "ServiceAccountConfig", "ServiceAccountConfig", 1) + `,`,
+ `WatchCacheSizes:` + strings.Replace(this.WatchCacheSizes.String(), "WatchCacheSizes", "WatchCacheSizes", 1) + `,`,
+ `Requests:` + strings.Replace(this.Requests.String(), "KubeAPIServerRequests", "KubeAPIServerRequests", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeAPIServerRequests) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeAPIServerRequests{`,
+ `MaxNonMutatingInflight:` + valueToStringGenerated(this.MaxNonMutatingInflight) + `,`,
+ `MaxMutatingInflight:` + valueToStringGenerated(this.MaxMutatingInflight) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeControllerManagerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeControllerManagerConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `HorizontalPodAutoscalerConfig:` + strings.Replace(this.HorizontalPodAutoscalerConfig.String(), "HorizontalPodAutoscalerConfig", "HorizontalPodAutoscalerConfig", 1) + `,`,
+ `NodeCIDRMaskSize:` + valueToStringGenerated(this.NodeCIDRMaskSize) + `,`,
+ `PodEvictionTimeout:` + strings.Replace(fmt.Sprintf("%v", this.PodEvictionTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeProxyConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeProxyConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `Mode:` + valueToStringGenerated(this.Mode) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeSchedulerConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeSchedulerConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `KubeMaxPDVols:` + valueToStringGenerated(this.KubeMaxPDVols) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfig{`,
+ `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`,
+ `CPUCFSQuota:` + valueToStringGenerated(this.CPUCFSQuota) + `,`,
+ `CPUManagerPolicy:` + valueToStringGenerated(this.CPUManagerPolicy) + `,`,
+ `EvictionHard:` + strings.Replace(this.EvictionHard.String(), "KubeletConfigEviction", "KubeletConfigEviction", 1) + `,`,
+ `EvictionMaxPodGracePeriod:` + valueToStringGenerated(this.EvictionMaxPodGracePeriod) + `,`,
+ `EvictionMinimumReclaim:` + strings.Replace(this.EvictionMinimumReclaim.String(), "KubeletConfigEvictionMinimumReclaim", "KubeletConfigEvictionMinimumReclaim", 1) + `,`,
+ `EvictionPressureTransitionPeriod:` + strings.Replace(fmt.Sprintf("%v", this.EvictionPressureTransitionPeriod), "Duration", "v11.Duration", 1) + `,`,
+ `EvictionSoft:` + strings.Replace(this.EvictionSoft.String(), "KubeletConfigEviction", "KubeletConfigEviction", 1) + `,`,
+ `EvictionSoftGracePeriod:` + strings.Replace(this.EvictionSoftGracePeriod.String(), "KubeletConfigEvictionSoftGracePeriod", "KubeletConfigEvictionSoftGracePeriod", 1) + `,`,
+ `MaxPods:` + valueToStringGenerated(this.MaxPods) + `,`,
+ `PodPIDsLimit:` + valueToStringGenerated(this.PodPIDsLimit) + `,`,
+ `ImagePullProgressDeadline:` + strings.Replace(fmt.Sprintf("%v", this.ImagePullProgressDeadline), "Duration", "v11.Duration", 1) + `,`,
+ `FailSwapOn:` + valueToStringGenerated(this.FailSwapOn) + `,`,
+ `KubeReserved:` + strings.Replace(this.KubeReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`,
+ `SystemReserved:` + strings.Replace(this.SystemReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigEviction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigEviction{`,
+ `MemoryAvailable:` + valueToStringGenerated(this.MemoryAvailable) + `,`,
+ `ImageFSAvailable:` + valueToStringGenerated(this.ImageFSAvailable) + `,`,
+ `ImageFSInodesFree:` + valueToStringGenerated(this.ImageFSInodesFree) + `,`,
+ `NodeFSAvailable:` + valueToStringGenerated(this.NodeFSAvailable) + `,`,
+ `NodeFSInodesFree:` + valueToStringGenerated(this.NodeFSInodesFree) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigEvictionMinimumReclaim) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigEvictionMinimumReclaim{`,
+ `MemoryAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MemoryAvailable), "Quantity", "resource.Quantity", 1) + `,`,
+ `ImageFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSAvailable), "Quantity", "resource.Quantity", 1) + `,`,
+ `ImageFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSInodesFree), "Quantity", "resource.Quantity", 1) + `,`,
+ `NodeFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSAvailable), "Quantity", "resource.Quantity", 1) + `,`,
+ `NodeFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSInodesFree), "Quantity", "resource.Quantity", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigEvictionSoftGracePeriod) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigEvictionSoftGracePeriod{`,
+ `MemoryAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MemoryAvailable), "Duration", "v11.Duration", 1) + `,`,
+ `ImageFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSAvailable), "Duration", "v11.Duration", 1) + `,`,
+ `ImageFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSInodesFree), "Duration", "v11.Duration", 1) + `,`,
+ `NodeFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSAvailable), "Duration", "v11.Duration", 1) + `,`,
+ `NodeFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSInodesFree), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubeletConfigReserved) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubeletConfigReserved{`,
+ `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1) + `,`,
+ `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1) + `,`,
+ `EphemeralStorage:` + strings.Replace(fmt.Sprintf("%v", this.EphemeralStorage), "Quantity", "resource.Quantity", 1) + `,`,
+ `PID:` + strings.Replace(fmt.Sprintf("%v", this.PID), "Quantity", "resource.Quantity", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Kubernetes) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Kubernetes{`,
+ `AllowPrivilegedContainers:` + valueToStringGenerated(this.AllowPrivilegedContainers) + `,`,
+ `ClusterAutoscaler:` + strings.Replace(this.ClusterAutoscaler.String(), "ClusterAutoscaler", "ClusterAutoscaler", 1) + `,`,
+ `KubeAPIServer:` + strings.Replace(this.KubeAPIServer.String(), "KubeAPIServerConfig", "KubeAPIServerConfig", 1) + `,`,
+ `KubeControllerManager:` + strings.Replace(this.KubeControllerManager.String(), "KubeControllerManagerConfig", "KubeControllerManagerConfig", 1) + `,`,
+ `KubeScheduler:` + strings.Replace(this.KubeScheduler.String(), "KubeSchedulerConfig", "KubeSchedulerConfig", 1) + `,`,
+ `KubeProxy:` + strings.Replace(this.KubeProxy.String(), "KubeProxyConfig", "KubeProxyConfig", 1) + `,`,
+ `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "VerticalPodAutoscaler", "VerticalPodAutoscaler", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForFeatureGates := make([]string, 0, len(this.FeatureGates))
+ for k := range this.FeatureGates {
+ keysForFeatureGates = append(keysForFeatureGates, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForFeatureGates)
+ mapStringForFeatureGates := "map[string]bool{"
+ for _, k := range keysForFeatureGates {
+ mapStringForFeatureGates += fmt.Sprintf("%v: %v,", k, this.FeatureGates[k])
+ }
+ mapStringForFeatureGates += "}"
+ s := strings.Join([]string{`&KubernetesConfig{`,
+ `FeatureGates:` + mapStringForFeatureGates + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesDashboard) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubernetesDashboard{`,
+ `AuthenticationMode:` + valueToStringGenerated(this.AuthenticationMode) + `,`,
+ `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "Addon", "Addon", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KubernetesInfo{`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KubernetesSettings) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVersions := "[]ExpirableVersion{"
+ for _, f := range this.Versions {
+ repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVersions += "}"
+ s := strings.Join([]string{`&KubernetesSettings{`,
+ `Versions:` + repeatedStringForVersions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LastError) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LastError{`,
+ `Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+ `TaskID:` + valueToStringGenerated(this.TaskID) + `,`,
+ `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`,
+ `LastUpdateTime:` + strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LastOperation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LastOperation{`,
+ `Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`,
+ `State:` + fmt.Sprintf("%v", this.State) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Machine) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Machine{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Image:` + strings.Replace(this.Image.String(), "ShootMachineImage", "ShootMachineImage", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineControllerManagerSettings) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MachineControllerManagerSettings{`,
+ `MachineDrainTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineDrainTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `MachineHealthTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineHealthTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `MachineCreationTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineCreationTimeout), "Duration", "v11.Duration", 1) + `,`,
+ `MaxEvictRetries:` + valueToStringGenerated(this.MaxEvictRetries) + `,`,
+ `NodeConditions:` + fmt.Sprintf("%v", this.NodeConditions) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineImage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVersions := "[]MachineImageVersion{"
+ for _, f := range this.Versions {
+ repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "MachineImageVersion", "MachineImageVersion", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVersions += "}"
+ s := strings.Join([]string{`&MachineImage{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Versions:` + repeatedStringForVersions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineImageVersion) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForCRI := "[]CRI{"
+ for _, f := range this.CRI {
+ repeatedStringForCRI += strings.Replace(strings.Replace(f.String(), "CRI", "CRI", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForCRI += "}"
+ s := strings.Join([]string{`&MachineImageVersion{`,
+ `ExpirableVersion:` + strings.Replace(strings.Replace(this.ExpirableVersion.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + `,`,
+ `CRI:` + repeatedStringForCRI + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineType) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MachineType{`,
+ `CPU:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `GPU:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GPU), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `Memory:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Storage:` + strings.Replace(this.Storage.String(), "MachineTypeStorage", "MachineTypeStorage", 1) + `,`,
+ `Usable:` + valueToStringGenerated(this.Usable) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MachineTypeStorage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MachineTypeStorage{`,
+ `Class:` + fmt.Sprintf("%v", this.Class) + `,`,
+ `StorageSize:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StorageSize), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Maintenance) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Maintenance{`,
+ `AutoUpdate:` + strings.Replace(this.AutoUpdate.String(), "MaintenanceAutoUpdate", "MaintenanceAutoUpdate", 1) + `,`,
+ `TimeWindow:` + strings.Replace(this.TimeWindow.String(), "MaintenanceTimeWindow", "MaintenanceTimeWindow", 1) + `,`,
+ `ConfineSpecUpdateRollout:` + valueToStringGenerated(this.ConfineSpecUpdateRollout) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MaintenanceAutoUpdate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MaintenanceAutoUpdate{`,
+ `KubernetesVersion:` + fmt.Sprintf("%v", this.KubernetesVersion) + `,`,
+ `MachineImageVersion:` + fmt.Sprintf("%v", this.MachineImageVersion) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MaintenanceTimeWindow) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MaintenanceTimeWindow{`,
+ `Begin:` + fmt.Sprintf("%v", this.Begin) + `,`,
+ `End:` + fmt.Sprintf("%v", this.End) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Monitoring) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Monitoring{`,
+ `Alerting:` + strings.Replace(this.Alerting.String(), "Alerting", "Alerting", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedResourceReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedResourceReference{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ResourceRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRef), "CrossVersionObjectReference", "v12.CrossVersionObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Networking) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Networking{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Pods:` + valueToStringGenerated(this.Pods) + `,`,
+ `Nodes:` + valueToStringGenerated(this.Nodes) + `,`,
+ `Services:` + valueToStringGenerated(this.Services) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NginxIngress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForConfig := make([]string, 0, len(this.Config))
+ for k := range this.Config {
+ keysForConfig = append(keysForConfig, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForConfig)
+ mapStringForConfig := "map[string]string{"
+ for _, k := range keysForConfig {
+ mapStringForConfig += fmt.Sprintf("%v: %v,", k, this.Config[k])
+ }
+ mapStringForConfig += "}"
+ s := strings.Join([]string{`&NginxIngress{`,
+ `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "Addon", "Addon", 1), `&`, ``, 1) + `,`,
+ `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`,
+ `Config:` + mapStringForConfig + `,`,
+ `ExternalTrafficPolicy:` + valueToStringGenerated(this.ExternalTrafficPolicy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OIDCConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForRequiredClaims := make([]string, 0, len(this.RequiredClaims))
+ for k := range this.RequiredClaims {
+ keysForRequiredClaims = append(keysForRequiredClaims, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims)
+ mapStringForRequiredClaims := "map[string]string{"
+ for _, k := range keysForRequiredClaims {
+ mapStringForRequiredClaims += fmt.Sprintf("%v: %v,", k, this.RequiredClaims[k])
+ }
+ mapStringForRequiredClaims += "}"
+ s := strings.Join([]string{`&OIDCConfig{`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `ClientAuthentication:` + strings.Replace(this.ClientAuthentication.String(), "OpenIDConnectClientAuthentication", "OpenIDConnectClientAuthentication", 1) + `,`,
+ `ClientID:` + valueToStringGenerated(this.ClientID) + `,`,
+ `GroupsClaim:` + valueToStringGenerated(this.GroupsClaim) + `,`,
+ `GroupsPrefix:` + valueToStringGenerated(this.GroupsPrefix) + `,`,
+ `IssuerURL:` + valueToStringGenerated(this.IssuerURL) + `,`,
+ `RequiredClaims:` + mapStringForRequiredClaims + `,`,
+ `SigningAlgs:` + fmt.Sprintf("%v", this.SigningAlgs) + `,`,
+ `UsernameClaim:` + valueToStringGenerated(this.UsernameClaim) + `,`,
+ `UsernamePrefix:` + valueToStringGenerated(this.UsernamePrefix) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OpenIDConnectClientAuthentication) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForExtraConfig := make([]string, 0, len(this.ExtraConfig))
+ for k := range this.ExtraConfig {
+ keysForExtraConfig = append(keysForExtraConfig, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig)
+ mapStringForExtraConfig := "map[string]string{"
+ for _, k := range keysForExtraConfig {
+ mapStringForExtraConfig += fmt.Sprintf("%v: %v,", k, this.ExtraConfig[k])
+ }
+ mapStringForExtraConfig += "}"
+ s := strings.Join([]string{`&OpenIDConnectClientAuthentication{`,
+ `ExtraConfig:` + mapStringForExtraConfig + `,`,
+ `Secret:` + valueToStringGenerated(this.Secret) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Plant) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Plant{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PlantSpec", "PlantSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PlantStatus", "PlantStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PlantList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Plant{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Plant", "Plant", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&PlantList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PlantSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEndpoints := "[]Endpoint{"
+ for _, f := range this.Endpoints {
+ repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEndpoints += "}"
+ s := strings.Join([]string{`&PlantSpec{`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "v1.LocalObjectReference", 1), `&`, ``, 1) + `,`,
+ `Endpoints:` + repeatedStringForEndpoints + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PlantStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&PlantStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`,
+ `ClusterInfo:` + strings.Replace(this.ClusterInfo.String(), "ClusterInfo", "ClusterInfo", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Project) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Project{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Project{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ProjectList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectMember) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProjectMember{`,
+ `Subject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subject), "Subject", "v13.Subject", 1), `&`, ``, 1) + `,`,
+ `Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+ `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForMembers := "[]ProjectMember{"
+ for _, f := range this.Members {
+ repeatedStringForMembers += strings.Replace(strings.Replace(f.String(), "ProjectMember", "ProjectMember", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMembers += "}"
+ s := strings.Join([]string{`&ProjectSpec{`,
+ `CreatedBy:` + strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Subject", "v13.Subject", 1) + `,`,
+ `Description:` + valueToStringGenerated(this.Description) + `,`,
+ `Owner:` + strings.Replace(fmt.Sprintf("%v", this.Owner), "Subject", "v13.Subject", 1) + `,`,
+ `Purpose:` + valueToStringGenerated(this.Purpose) + `,`,
+ `Members:` + repeatedStringForMembers + `,`,
+ `Namespace:` + valueToStringGenerated(this.Namespace) + `,`,
+ `Tolerations:` + strings.Replace(this.Tolerations.String(), "ProjectTolerations", "ProjectTolerations", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProjectStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+ `StaleSinceTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleSinceTimestamp), "Time", "v11.Time", 1) + `,`,
+ `StaleAutoDeleteTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleAutoDeleteTimestamp), "Time", "v11.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectTolerations) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForDefaults := "[]Toleration{"
+ for _, f := range this.Defaults {
+ repeatedStringForDefaults += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDefaults += "}"
+ repeatedStringForWhitelist := "[]Toleration{"
+ for _, f := range this.Whitelist {
+ repeatedStringForWhitelist += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWhitelist += "}"
+ s := strings.Join([]string{`&ProjectTolerations{`,
+ `Defaults:` + repeatedStringForDefaults + `,`,
+ `Whitelist:` + repeatedStringForWhitelist + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Provider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForWorkers := "[]Worker{"
+ for _, f := range this.Workers {
+ repeatedStringForWorkers += strings.Replace(strings.Replace(f.String(), "Worker", "Worker", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWorkers += "}"
+ s := strings.Join([]string{`&Provider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ControlPlaneConfig:` + strings.Replace(fmt.Sprintf("%v", this.ControlPlaneConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `InfrastructureConfig:` + strings.Replace(fmt.Sprintf("%v", this.InfrastructureConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Workers:` + repeatedStringForWorkers + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Quota) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Quota{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "QuotaSpec", "QuotaSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QuotaList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Quota{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Quota", "Quota", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&QuotaList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QuotaSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForMetrics := make([]string, 0, len(this.Metrics))
+ for k := range this.Metrics {
+ keysForMetrics = append(keysForMetrics, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics)
+ mapStringForMetrics := "k8s_io_api_core_v1.ResourceList{"
+ for _, k := range keysForMetrics {
+ mapStringForMetrics += fmt.Sprintf("%v: %v,", k, this.Metrics[k8s_io_api_core_v1.ResourceName(k)])
+ }
+ mapStringForMetrics += "}"
+ s := strings.Join([]string{`&QuotaSpec{`,
+ `ClusterLifetimeDays:` + valueToStringGenerated(this.ClusterLifetimeDays) + `,`,
+ `Metrics:` + mapStringForMetrics + `,`,
+ `Scope:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Scope), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Region) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForZones := "[]AvailabilityZone{"
+ for _, f := range this.Zones {
+ repeatedStringForZones += strings.Replace(strings.Replace(f.String(), "AvailabilityZone", "AvailabilityZone", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForZones += "}"
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&Region{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Zones:` + repeatedStringForZones + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceWatchCacheSize) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceWatchCacheSize{`,
+ `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
+ `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+ `CacheSize:` + fmt.Sprintf("%v", this.CacheSize) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForQuotas := "[]ObjectReference{"
+ for _, f := range this.Quotas {
+ repeatedStringForQuotas += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForQuotas += "}"
+ s := strings.Join([]string{`&SecretBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `Quotas:` + repeatedStringForQuotas + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]SecretBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "SecretBinding", "SecretBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&SecretBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Seed) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Seed{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SeedSpec", "SeedSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SeedStatus", "SeedStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedBackup) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedBackup{`,
+ `Provider:` + fmt.Sprintf("%v", this.Provider) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Region:` + valueToStringGenerated(this.Region) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedDNS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedDNS{`,
+ `IngressDomain:` + valueToStringGenerated(this.IngressDomain) + `,`,
+ `Provider:` + strings.Replace(this.Provider.String(), "SeedDNSProvider", "SeedDNSProvider", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedDNSProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedDNSProvider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`,
+ `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Seed{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Seed", "Seed", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&SeedList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedNetworks) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedNetworks{`,
+ `Nodes:` + valueToStringGenerated(this.Nodes) + `,`,
+ `Pods:` + fmt.Sprintf("%v", this.Pods) + `,`,
+ `Services:` + fmt.Sprintf("%v", this.Services) + `,`,
+ `ShootDefaults:` + strings.Replace(this.ShootDefaults.String(), "ShootNetworks", "ShootNetworks", 1) + `,`,
+ `BlockCIDRs:` + fmt.Sprintf("%v", this.BlockCIDRs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedProvider{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSelector{`,
+ `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
+ `ProviderTypes:` + fmt.Sprintf("%v", this.ProviderTypes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingExcessCapacityReservation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingExcessCapacityReservation{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingLoadBalancerServices) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ s := strings.Join([]string{`&SeedSettingLoadBalancerServices{`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingScheduling) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingScheduling{`,
+ `Visible:` + fmt.Sprintf("%v", this.Visible) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingShootDNS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingShootDNS{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettingVerticalPodAutoscaler) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettingVerticalPodAutoscaler{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSettings) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedSettings{`,
+ `ExcessCapacityReservation:` + strings.Replace(this.ExcessCapacityReservation.String(), "SeedSettingExcessCapacityReservation", "SeedSettingExcessCapacityReservation", 1) + `,`,
+ `Scheduling:` + strings.Replace(this.Scheduling.String(), "SeedSettingScheduling", "SeedSettingScheduling", 1) + `,`,
+ `ShootDNS:` + strings.Replace(this.ShootDNS.String(), "SeedSettingShootDNS", "SeedSettingShootDNS", 1) + `,`,
+ `LoadBalancerServices:` + strings.Replace(this.LoadBalancerServices.String(), "SeedSettingLoadBalancerServices", "SeedSettingLoadBalancerServices", 1) + `,`,
+ `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "SeedSettingVerticalPodAutoscaler", "SeedSettingVerticalPodAutoscaler", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTaints := "[]SeedTaint{"
+ for _, f := range this.Taints {
+ repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "SeedTaint", "SeedTaint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTaints += "}"
+ s := strings.Join([]string{`&SeedSpec{`,
+ `Backup:` + strings.Replace(this.Backup.String(), "SeedBackup", "SeedBackup", 1) + `,`,
+ `DNS:` + strings.Replace(strings.Replace(this.DNS.String(), "SeedDNS", "SeedDNS", 1), `&`, ``, 1) + `,`,
+ `Networks:` + strings.Replace(strings.Replace(this.Networks.String(), "SeedNetworks", "SeedNetworks", 1), `&`, ``, 1) + `,`,
+ `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "SeedProvider", "SeedProvider", 1), `&`, ``, 1) + `,`,
+ `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1) + `,`,
+ `Taints:` + repeatedStringForTaints + `,`,
+ `Volume:` + strings.Replace(this.Volume.String(), "SeedVolume", "SeedVolume", 1) + `,`,
+ `Settings:` + strings.Replace(this.Settings.String(), "SeedSettings", "SeedSettings", 1) + `,`,
+ `Ingress:` + strings.Replace(this.Ingress.String(), "Ingress", "Ingress", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ keysForCapacity := make([]string, 0, len(this.Capacity))
+ for k := range this.Capacity {
+ keysForCapacity = append(keysForCapacity, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ mapStringForCapacity := "k8s_io_api_core_v1.ResourceList{"
+ for _, k := range keysForCapacity {
+ mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[k8s_io_api_core_v1.ResourceName(k)])
+ }
+ mapStringForCapacity += "}"
+ keysForAllocatable := make([]string, 0, len(this.Allocatable))
+ for k := range this.Allocatable {
+ keysForAllocatable = append(keysForAllocatable, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable)
+ mapStringForAllocatable := "k8s_io_api_core_v1.ResourceList{"
+ for _, k := range keysForAllocatable {
+ mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[k8s_io_api_core_v1.ResourceName(k)])
+ }
+ mapStringForAllocatable += "}"
+ s := strings.Join([]string{`&SeedStatus{`,
+ `Gardener:` + strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1) + `,`,
+ `KubernetesVersion:` + valueToStringGenerated(this.KubernetesVersion) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`,
+ `Capacity:` + mapStringForCapacity + `,`,
+ `Allocatable:` + mapStringForAllocatable + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedTaint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedTaint{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + valueToStringGenerated(this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedVolume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForProviders := "[]SeedVolumeProvider{"
+ for _, f := range this.Providers {
+ repeatedStringForProviders += strings.Replace(strings.Replace(f.String(), "SeedVolumeProvider", "SeedVolumeProvider", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForProviders += "}"
+ s := strings.Join([]string{`&SeedVolume{`,
+ `MinimumSize:` + strings.Replace(fmt.Sprintf("%v", this.MinimumSize), "Quantity", "resource.Quantity", 1) + `,`,
+ `Providers:` + repeatedStringForProviders + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SeedVolumeProvider) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SeedVolumeProvider{`,
+ `Purpose:` + fmt.Sprintf("%v", this.Purpose) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceAccountConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceAccountConfig{`,
+ `Issuer:` + valueToStringGenerated(this.Issuer) + `,`,
+ `SigningKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SigningKeySecret), "LocalObjectReference", "v1.LocalObjectReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Shoot) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Shoot{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ShootSpec", "ShootSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ShootStatus", "ShootStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Shoot{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Shoot", "Shoot", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ShootList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootMachineImage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ShootMachineImage{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Version:` + valueToStringGenerated(this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootNetworks) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ShootNetworks{`,
+ `Pods:` + valueToStringGenerated(this.Pods) + `,`,
+ `Services:` + valueToStringGenerated(this.Services) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForExtensions := "[]Extension{"
+ for _, f := range this.Extensions {
+ repeatedStringForExtensions += strings.Replace(strings.Replace(f.String(), "Extension", "Extension", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExtensions += "}"
+ repeatedStringForResources := "[]NamedResourceReference{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "NamedResourceReference", "NamedResourceReference", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ repeatedStringForTolerations := "[]Toleration{"
+ for _, f := range this.Tolerations {
+ repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTolerations += "}"
+ s := strings.Join([]string{`&ShootSpec{`,
+ `Addons:` + strings.Replace(this.Addons.String(), "Addons", "Addons", 1) + `,`,
+ `CloudProfileName:` + fmt.Sprintf("%v", this.CloudProfileName) + `,`,
+ `DNS:` + strings.Replace(this.DNS.String(), "DNS", "DNS", 1) + `,`,
+ `Extensions:` + repeatedStringForExtensions + `,`,
+ `Hibernation:` + strings.Replace(this.Hibernation.String(), "Hibernation", "Hibernation", 1) + `,`,
+ `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "Kubernetes", "Kubernetes", 1), `&`, ``, 1) + `,`,
+ `Networking:` + strings.Replace(strings.Replace(this.Networking.String(), "Networking", "Networking", 1), `&`, ``, 1) + `,`,
+ `Maintenance:` + strings.Replace(this.Maintenance.String(), "Maintenance", "Maintenance", 1) + `,`,
+ `Monitoring:` + strings.Replace(this.Monitoring.String(), "Monitoring", "Monitoring", 1) + `,`,
+ `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "Provider", "Provider", 1), `&`, ``, 1) + `,`,
+ `Purpose:` + valueToStringGenerated(this.Purpose) + `,`,
+ `Region:` + fmt.Sprintf("%v", this.Region) + `,`,
+ `SecretBindingName:` + fmt.Sprintf("%v", this.SecretBindingName) + `,`,
+ `SeedName:` + valueToStringGenerated(this.SeedName) + `,`,
+ `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `Tolerations:` + repeatedStringForTolerations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ShootStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ repeatedStringForConstraints := "[]Condition{"
+ for _, f := range this.Constraints {
+ repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConstraints += "}"
+ repeatedStringForLastErrors := "[]LastError{"
+ for _, f := range this.LastErrors {
+ repeatedStringForLastErrors += strings.Replace(strings.Replace(f.String(), "LastError", "LastError", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForLastErrors += "}"
+ s := strings.Join([]string{`&ShootStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Constraints:` + repeatedStringForConstraints + `,`,
+ `Gardener:` + strings.Replace(strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1), `&`, ``, 1) + `,`,
+ `IsHibernated:` + fmt.Sprintf("%v", this.IsHibernated) + `,`,
+ `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`,
+ `LastErrors:` + repeatedStringForLastErrors + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `RetryCycleStartTime:` + strings.Replace(fmt.Sprintf("%v", this.RetryCycleStartTime), "Time", "v11.Time", 1) + `,`,
+ `SeedName:` + valueToStringGenerated(this.SeedName) + `,`,
+ `TechnicalID:` + fmt.Sprintf("%v", this.TechnicalID) + `,`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Toleration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Toleration{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + valueToStringGenerated(this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *VerticalPodAutoscaler) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&VerticalPodAutoscaler{`,
+ `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`,
+ `EvictAfterOOMThreshold:` + strings.Replace(fmt.Sprintf("%v", this.EvictAfterOOMThreshold), "Duration", "v11.Duration", 1) + `,`,
+ `EvictionRateBurst:` + valueToStringGenerated(this.EvictionRateBurst) + `,`,
+ `EvictionRateLimit:` + valueToStringGenerated(this.EvictionRateLimit) + `,`,
+ `EvictionTolerance:` + valueToStringGenerated(this.EvictionTolerance) + `,`,
+ `RecommendationMarginFraction:` + valueToStringGenerated(this.RecommendationMarginFraction) + `,`,
+ `UpdaterInterval:` + strings.Replace(fmt.Sprintf("%v", this.UpdaterInterval), "Duration", "v11.Duration", 1) + `,`,
+ `RecommenderInterval:` + strings.Replace(fmt.Sprintf("%v", this.RecommenderInterval), "Duration", "v11.Duration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Volume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Volume{`,
+ `Name:` + valueToStringGenerated(this.Name) + `,`,
+ `Type:` + valueToStringGenerated(this.Type) + `,`,
+ `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`,
+ `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *VolumeType) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&VolumeType{`,
+ `Class:` + fmt.Sprintf("%v", this.Class) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Usable:` + valueToStringGenerated(this.Usable) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WatchCacheSizes) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResources := "[]ResourceWatchCacheSize{"
+ for _, f := range this.Resources {
+ repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceWatchCacheSize", "ResourceWatchCacheSize", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResources += "}"
+ s := strings.Join([]string{`&WatchCacheSizes{`,
+ `Default:` + valueToStringGenerated(this.Default) + `,`,
+ `Resources:` + repeatedStringForResources + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Worker) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTaints := "[]Taint{"
+ for _, f := range this.Taints {
+ repeatedStringForTaints += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForTaints += "}"
+ repeatedStringForDataVolumes := "[]DataVolume{"
+ for _, f := range this.DataVolumes {
+ repeatedStringForDataVolumes += strings.Replace(strings.Replace(f.String(), "DataVolume", "DataVolume", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDataVolumes += "}"
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ s := strings.Join([]string{`&Worker{`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `CRI:` + strings.Replace(this.CRI.String(), "CRI", "CRI", 1) + `,`,
+ `Kubernetes:` + strings.Replace(this.Kubernetes.String(), "WorkerKubernetes", "WorkerKubernetes", 1) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Machine:` + strings.Replace(strings.Replace(this.Machine.String(), "Machine", "Machine", 1), `&`, ``, 1) + `,`,
+ `Maximum:` + fmt.Sprintf("%v", this.Maximum) + `,`,
+ `Minimum:` + fmt.Sprintf("%v", this.Minimum) + `,`,
+ `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`,
+ `Taints:` + repeatedStringForTaints + `,`,
+ `Volume:` + strings.Replace(this.Volume.String(), "Volume", "Volume", 1) + `,`,
+ `DataVolumes:` + repeatedStringForDataVolumes + `,`,
+ `KubeletDataVolumeName:` + valueToStringGenerated(this.KubeletDataVolumeName) + `,`,
+ `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`,
+ `SystemComponents:` + strings.Replace(this.SystemComponents.String(), "WorkerSystemComponents", "WorkerSystemComponents", 1) + `,`,
+ `MachineControllerManagerSettings:` + strings.Replace(this.MachineControllerManagerSettings.String(), "MachineControllerManagerSettings", "MachineControllerManagerSettings", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WorkerKubernetes) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WorkerKubernetes{`,
+ `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WorkerSystemComponents) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WorkerSystemComponents{`,
+ `Allow:` + fmt.Sprintf("%v", this.Allow) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Addon) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Addon: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Addon: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Addons) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Addons: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Addons: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesDashboard", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubernetesDashboard == nil {
+ m.KubernetesDashboard = &KubernetesDashboard{}
+ }
+ if err := m.KubernetesDashboard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NginxIngress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NginxIngress == nil {
+ m.NginxIngress = &NginxIngress{}
+ }
+ if err := m.NginxIngress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AdmissionPlugin) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AdmissionPlugin: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AdmissionPlugin: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Config == nil {
+ m.Config = &runtime.RawExtension{}
+ }
+ if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Alerting) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Alerting: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Alerting: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EmailReceivers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EmailReceivers = append(m.EmailReceivers, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuditConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuditPolicy == nil {
+ m.AuditPolicy = &AuditPolicy{}
+ }
+ if err := m.AuditPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuditPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigMapRef == nil {
+ m.ConfigMapRef = &v1.ObjectReference{}
+ }
+ if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AvailabilityZone) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AvailabilityZone: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AvailabilityZone: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableMachineTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UnavailableMachineTypes = append(m.UnavailableMachineTypes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableVolumeTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UnavailableVolumeTypes = append(m.UnavailableVolumeTypes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucket) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucket: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucket: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, BackupBucket{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SeedName = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupBucketStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupBucketStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupBucketStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderStatus == nil {
+ m.ProviderStatus = &runtime.RawExtension{}
+ }
+ if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastOperation == nil {
+ m.LastOperation = &LastOperation{}
+ }
+ if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastError == nil {
+ m.LastError = &LastError{}
+ }
+ if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GeneratedSecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GeneratedSecretRef == nil {
+ m.GeneratedSecretRef = &v1.SecretReference{}
+ }
+ if err := m.GeneratedSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntryList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntryList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntryList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, BackupEntry{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntrySpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntrySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntrySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BucketName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BucketName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SeedName = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupEntryStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupEntryStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupEntryStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastOperation == nil {
+ m.LastOperation = &LastOperation{}
+ }
+ if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastError == nil {
+ m.LastError = &LastError{}
+ }
+ if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CRI) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CRI: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CRI: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = CRIName(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerRuntimes = append(m.ContainerRuntimes, ContainerRuntime{})
+ if err := m.ContainerRuntimes[len(m.ContainerRuntimes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudProfile) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudProfile: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudProfile: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudProfileList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudProfileList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudProfileList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CloudProfile{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudProfileSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudProfileSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudProfileSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CABundle = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineImages", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MachineImages = append(m.MachineImages, MachineImage{})
+ if err := m.MachineImages[len(m.MachineImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineTypes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MachineTypes = append(m.MachineTypes, MachineType{})
+ if err := m.MachineTypes[len(m.MachineTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Regions = append(m.Regions, Region{})
+ if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SeedSelector == nil {
+ m.SeedSelector = &SeedSelector{}
+ }
+ if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeTypes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeTypes = append(m.VolumeTypes, VolumeType{})
+ if err := m.VolumeTypes[len(m.VolumeTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterAutoscaler) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownDelayAfterAdd == nil {
+ m.ScaleDownDelayAfterAdd = &v11.Duration{}
+ }
+ if err := m.ScaleDownDelayAfterAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownDelayAfterDelete == nil {
+ m.ScaleDownDelayAfterDelete = &v11.Duration{}
+ }
+ if err := m.ScaleDownDelayAfterDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterFailure", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownDelayAfterFailure == nil {
+ m.ScaleDownDelayAfterFailure = &v11.Duration{}
+ }
+ if err := m.ScaleDownDelayAfterFailure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownUnneededTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScaleDownUnneededTime == nil {
+ m.ScaleDownUnneededTime = &v11.Duration{}
+ }
+ if err := m.ScaleDownUnneededTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownUtilizationThreshold", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.ScaleDownUtilizationThreshold = &v2
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScanInterval", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScanInterval == nil {
+ m.ScanInterval = &v11.Duration{}
+ }
+ if err := m.ScanInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cloud", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Cloud.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Condition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Condition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerRuntime) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerRuntime: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerRuntime: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerDeployment) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerDeployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerDeployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ControllerDeploymentPolicy(dAtA[iNdEx:postIndex])
+ m.Policy = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SeedSelector == nil {
+ m.SeedSelector = &v11.LabelSelector{}
+ }
+ if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ControllerInstallation{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallationSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallationSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallationSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RegistrationRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RegistrationRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SeedRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerInstallationStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerInstallationStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerInstallationStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderStatus == nil {
+ m.ProviderStatus = &runtime.RawExtension{}
+ }
+ if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRegistration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRegistration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRegistration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRegistrationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRegistrationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRegistrationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ControllerRegistration{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerRegistrationSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerRegistrationSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerRegistrationSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, ControllerResource{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Deployment == nil {
+ m.Deployment = &ControllerDeployment{}
+ }
+ if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ControllerResource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ControllerResource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ControllerResource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GloballyEnabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.GloballyEnabled = &b
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReconcileTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ReconcileTimeout == nil {
+ m.ReconcileTimeout = &v11.Duration{}
+ }
+ if err := m.ReconcileTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Primary = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DNS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DNS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DNS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Domain = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Providers = append(m.Providers, DNSProvider{})
+ if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DNSIncludeExclude: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DNSIncludeExclude: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Include", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Include = append(m.Include, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exclude", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exclude = append(m.Exclude, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DNSProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DNSProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DNSProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Domains == nil {
+ m.Domains = &DNSIncludeExclude{}
+ }
+ if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Primary = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SecretName = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Type = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Zones == nil {
+ m.Zones = &DNSIncludeExclude{}
+ }
+ if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DataVolume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DataVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DataVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Type = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeSize = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Encrypted = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Endpoint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Endpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.URL = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Purpose = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExpirableVersion) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExpirableVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExpirableVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirationDate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExpirationDate == nil {
+ m.ExpirationDate = &v11.Time{}
+ }
+ if err := m.ExpirationDate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Classification", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := VersionClassification(dAtA[iNdEx:postIndex])
+ m.Classification = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Extension) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Extension: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Disabled = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Gardener) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Gardener: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Gardener: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Hibernation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Hibernation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Hibernation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Enabled = &b
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Schedules = append(m.Schedules, HibernationSchedule{})
+ if err := m.Schedules[len(m.Schedules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HibernationSchedule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HibernationSchedule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HibernationSchedule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Start = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.End = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Location = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUInitializationPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CPUInitializationPeriod == nil {
+ m.CPUInitializationPeriod = &v11.Duration{}
+ }
+ if err := m.CPUInitializationPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DownscaleDelay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DownscaleDelay == nil {
+ m.DownscaleDelay = &v11.Duration{}
+ }
+ if err := m.DownscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DownscaleStabilization", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DownscaleStabilization == nil {
+ m.DownscaleStabilization = &v11.Duration{}
+ }
+ if err := m.DownscaleStabilization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InitialReadinessDelay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.InitialReadinessDelay == nil {
+ m.InitialReadinessDelay = &v11.Duration{}
+ }
+ if err := m.InitialReadinessDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SyncPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SyncPeriod == nil {
+ m.SyncPeriod = &v11.Duration{}
+ }
+ if err := m.SyncPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.Tolerance = &v2
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpscaleDelay", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpscaleDelay == nil {
+ m.UpscaleDelay = &v11.Duration{}
+ }
+ if err := m.UpscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Ingress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Domain = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Controller.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressController) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressController: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressController: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeAPIServerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeAPIServerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdmissionPlugins", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AdmissionPlugins = append(m.AdmissionPlugins, AdmissionPlugin{})
+ if err := m.AdmissionPlugins[len(m.AdmissionPlugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIAudiences", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIAudiences = append(m.APIAudiences, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuditConfig == nil {
+ m.AuditConfig = &AuditConfig{}
+ }
+ if err := m.AuditConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EnableBasicAuthentication", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.EnableBasicAuthentication = &b
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OIDCConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.OIDCConfig == nil {
+ m.OIDCConfig = &OIDCConfig{}
+ }
+ if err := m.OIDCConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RuntimeConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RuntimeConfig == nil {
+ m.RuntimeConfig = make(map[string]bool)
+ }
+ var mapkey string
+ var mapvalue bool
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapvaluetemp int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapvaluetemp |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ mapvalue = bool(mapvaluetemp != 0)
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.RuntimeConfig[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ServiceAccountConfig == nil {
+ m.ServiceAccountConfig = &ServiceAccountConfig{}
+ }
+ if err := m.ServiceAccountConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchCacheSizes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.WatchCacheSizes == nil {
+ m.WatchCacheSizes = &WatchCacheSizes{}
+ }
+ if err := m.WatchCacheSizes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Requests == nil {
+ m.Requests = &KubeAPIServerRequests{}
+ }
+ if err := m.Requests.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeAPIServerRequests) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeAPIServerRequests: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeAPIServerRequests: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxNonMutatingInflight", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxNonMutatingInflight = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxMutatingInflight", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxMutatingInflight = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeControllerManagerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeControllerManagerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HorizontalPodAutoscalerConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HorizontalPodAutoscalerConfig == nil {
+ m.HorizontalPodAutoscalerConfig = &HorizontalPodAutoscalerConfig{}
+ }
+ if err := m.HorizontalPodAutoscalerConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeCIDRMaskSize", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NodeCIDRMaskSize = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodEvictionTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodEvictionTimeout == nil {
+ m.PodEvictionTimeout = &v11.Duration{}
+ }
+ if err := m.PodEvictionTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeProxyConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ProxyMode(dAtA[iNdEx:postIndex])
+ m.Mode = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeSchedulerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeSchedulerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeMaxPDVols", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.KubeMaxPDVols = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUCFSQuota", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.CPUCFSQuota = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUManagerPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CPUManagerPolicy = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionHard", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionHard == nil {
+ m.EvictionHard = &KubeletConfigEviction{}
+ }
+ if err := m.EvictionHard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionMaxPodGracePeriod", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EvictionMaxPodGracePeriod = &v
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionMinimumReclaim", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionMinimumReclaim == nil {
+ m.EvictionMinimumReclaim = &KubeletConfigEvictionMinimumReclaim{}
+ }
+ if err := m.EvictionMinimumReclaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionPressureTransitionPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionPressureTransitionPeriod == nil {
+ m.EvictionPressureTransitionPeriod = &v11.Duration{}
+ }
+ if err := m.EvictionPressureTransitionPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoft", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionSoft == nil {
+ m.EvictionSoft = &KubeletConfigEviction{}
+ }
+ if err := m.EvictionSoft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoftGracePeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictionSoftGracePeriod == nil {
+ m.EvictionSoftGracePeriod = &KubeletConfigEvictionSoftGracePeriod{}
+ }
+ if err := m.EvictionSoftGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxPods", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxPods = &v
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodPIDsLimit", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PodPIDsLimit = &v
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImagePullProgressDeadline", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImagePullProgressDeadline == nil {
+ m.ImagePullProgressDeadline = &v11.Duration{}
+ }
+ if err := m.ImagePullProgressDeadline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailSwapOn", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.FailSwapOn = &b
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeReserved", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeReserved == nil {
+ m.KubeReserved = &KubeletConfigReserved{}
+ }
+ if err := m.KubeReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemReserved", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SystemReserved == nil {
+ m.SystemReserved = &KubeletConfigReserved{}
+ }
+ if err := m.SystemReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigEviction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigEviction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.MemoryAvailable = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ImageFSAvailable = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ImageFSInodesFree = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeFSAvailable = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeFSInodesFree = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MemoryAvailable == nil {
+ m.MemoryAvailable = &resource.Quantity{}
+ }
+ if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSAvailable == nil {
+ m.ImageFSAvailable = &resource.Quantity{}
+ }
+ if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSInodesFree == nil {
+ m.ImageFSInodesFree = &resource.Quantity{}
+ }
+ if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSAvailable == nil {
+ m.NodeFSAvailable = &resource.Quantity{}
+ }
+ if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSInodesFree == nil {
+ m.NodeFSInodesFree = &resource.Quantity{}
+ }
+ if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MemoryAvailable == nil {
+ m.MemoryAvailable = &v11.Duration{}
+ }
+ if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSAvailable == nil {
+ m.ImageFSAvailable = &v11.Duration{}
+ }
+ if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageFSInodesFree == nil {
+ m.ImageFSInodesFree = &v11.Duration{}
+ }
+ if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSAvailable == nil {
+ m.NodeFSAvailable = &v11.Duration{}
+ }
+ if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeFSInodesFree == nil {
+ m.NodeFSInodesFree = &v11.Duration{}
+ }
+ if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubeletConfigReserved) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubeletConfigReserved: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubeletConfigReserved: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CPU == nil {
+ m.CPU = &resource.Quantity{}
+ }
+ if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Memory == nil {
+ m.Memory = &resource.Quantity{}
+ }
+ if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EphemeralStorage", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EphemeralStorage == nil {
+ m.EphemeralStorage = &resource.Quantity{}
+ }
+ if err := m.EphemeralStorage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PID == nil {
+ m.PID = &resource.Quantity{}
+ }
+ if err := m.PID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Kubernetes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Kubernetes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Kubernetes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainers", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AllowPrivilegedContainers = &b
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterAutoscaler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterAutoscaler == nil {
+ m.ClusterAutoscaler = &ClusterAutoscaler{}
+ }
+ if err := m.ClusterAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeAPIServer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeAPIServer == nil {
+ m.KubeAPIServer = &KubeAPIServerConfig{}
+ }
+ if err := m.KubeAPIServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeControllerManager", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeControllerManager == nil {
+ m.KubeControllerManager = &KubeControllerManagerConfig{}
+ }
+ if err := m.KubeControllerManager.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeScheduler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeScheduler == nil {
+ m.KubeScheduler = &KubeSchedulerConfig{}
+ }
+ if err := m.KubeScheduler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeProxy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.KubeProxy == nil {
+ m.KubeProxy = &KubeProxyConfig{}
+ }
+ if err := m.KubeProxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kubelet == nil {
+ m.Kubelet = &KubeletConfig{}
+ }
+ if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VerticalPodAutoscaler == nil {
+ m.VerticalPodAutoscaler = &VerticalPodAutoscaler{}
+ }
+ if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FeatureGates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FeatureGates == nil {
+ m.FeatureGates = make(map[string]bool)
+ }
+ var mapkey string
+ var mapvalue bool
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapvaluetemp int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapvaluetemp |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ mapvalue = bool(mapvaluetemp != 0)
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.FeatureGates[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesDashboard: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesDashboard: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthenticationMode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.AuthenticationMode = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KubernetesSettings) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KubernetesSettings: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KubernetesSettings: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, ExpirableVersion{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LastError) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LastError: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LastError: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.TaskID = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastUpdateTime == nil {
+ m.LastUpdateTime = &v11.Time{}
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LastOperation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LastOperation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LastOperation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType)
+ }
+ m.Progress = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Progress |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.State = LastOperationState(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = LastOperationType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Machine) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Machine: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Machine: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Image == nil {
+ m.Image = &ShootMachineImage{}
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineControllerManagerSettings) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineControllerManagerSettings: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineControllerManagerSettings: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineDrainTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineDrainTimeout == nil {
+ m.MachineDrainTimeout = &v11.Duration{}
+ }
+ if err := m.MachineDrainTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineHealthTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineHealthTimeout == nil {
+ m.MachineHealthTimeout = &v11.Duration{}
+ }
+ if err := m.MachineHealthTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineCreationTimeout", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineCreationTimeout == nil {
+ m.MachineCreationTimeout = &v11.Duration{}
+ }
+ if err := m.MachineCreationTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxEvictRetries", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxEvictRetries = &v
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeConditions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeConditions = append(m.NodeConditions, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineImage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineImage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineImage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, MachineImageVersion{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineImageVersion) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineImageVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineImageVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirableVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ExpirableVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CRI = append(m.CRI, CRI{})
+ if err := m.CRI[len(m.CRI)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineType) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineType: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineType: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GPU", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.GPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Storage == nil {
+ m.Storage = &MachineTypeStorage{}
+ }
+ if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Usable = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MachineTypeStorage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MachineTypeStorage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Class = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StorageSize", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.StorageSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Maintenance) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Maintenance: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Maintenance: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AutoUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AutoUpdate == nil {
+ m.AutoUpdate = &MaintenanceAutoUpdate{}
+ }
+ if err := m.AutoUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeWindow", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TimeWindow == nil {
+ m.TimeWindow = &MaintenanceTimeWindow{}
+ }
+ if err := m.TimeWindow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfineSpecUpdateRollout", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ConfineSpecUpdateRollout = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MaintenanceAutoUpdate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MaintenanceAutoUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.KubernetesVersion = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineImageVersion", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MachineImageVersion = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MaintenanceTimeWindow: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MaintenanceTimeWindow: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Begin = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.End = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Monitoring) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Monitoring: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Monitoring: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alerting", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alerting == nil {
+ m.Alerting = &Alerting{}
+ }
+ if err := m.Alerting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedResourceReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedResourceReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedResourceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ResourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Networking) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Networking: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Networking: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Pods = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Nodes = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Services = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NginxIngress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NginxIngress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NginxIngress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Config == nil {
+ m.Config = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Config[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex])
+ m.ExternalTrafficPolicy = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OIDCConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OIDCConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OIDCConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CABundle = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientAuthentication", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientAuthentication == nil {
+ m.ClientAuthentication = &OpenIDConnectClientAuthentication{}
+ }
+ if err := m.ClientAuthentication.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ClientID = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsClaim", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.GroupsClaim = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsPrefix", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.GroupsPrefix = &s
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IssuerURL", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.IssuerURL = &s
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredClaims", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RequiredClaims == nil {
+ m.RequiredClaims = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.RequiredClaims[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SigningAlgs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SigningAlgs = append(m.SigningAlgs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UsernameClaim", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.UsernameClaim = &s
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UsernamePrefix", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.UsernamePrefix = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OpenIDConnectClientAuthentication: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OpenIDConnectClientAuthentication: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExtraConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExtraConfig == nil {
+ m.ExtraConfig = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.ExtraConfig[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Secret = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Plant) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Plant: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Plant: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PlantList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PlantList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PlantList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Plant{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PlantSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PlantSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PlantSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Endpoints = append(m.Endpoints, Endpoint{})
+ if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PlantStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PlantStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PlantStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ObservedGeneration = &v
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterInfo == nil {
+ m.ClusterInfo = &ClusterInfo{}
+ }
+ if err := m.ClusterInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Project) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Project: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Project{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectMember) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectMember: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectMember: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CreatedBy == nil {
+ m.CreatedBy = &v13.Subject{}
+ }
+ if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Description = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Owner == nil {
+ m.Owner = &v13.Subject{}
+ }
+ if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Purpose = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, ProjectMember{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Namespace = &s
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Tolerations == nil {
+ m.Tolerations = &ProjectTolerations{}
+ }
+ if err := m.Tolerations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = ProjectPhase(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StaleSinceTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StaleSinceTimestamp == nil {
+ m.StaleSinceTimestamp = &v11.Time{}
+ }
+ if err := m.StaleSinceTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StaleAutoDeleteTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StaleAutoDeleteTimestamp == nil {
+ m.StaleAutoDeleteTimestamp = &v11.Time{}
+ }
+ if err := m.StaleAutoDeleteTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectTolerations) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectTolerations: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectTolerations: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Defaults", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Defaults = append(m.Defaults, Toleration{})
+ if err := m.Defaults[len(m.Defaults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Whitelist", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Whitelist = append(m.Whitelist, Toleration{})
+ if err := m.Whitelist[len(m.Whitelist)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Provider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Provider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ControlPlaneConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ControlPlaneConfig == nil {
+ m.ControlPlaneConfig = &runtime.RawExtension{}
+ }
+ if err := m.ControlPlaneConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InfrastructureConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.InfrastructureConfig == nil {
+ m.InfrastructureConfig = &runtime.RawExtension{}
+ }
+ if err := m.InfrastructureConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Workers = append(m.Workers, Worker{})
+ if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Quota) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Quota: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QuotaList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QuotaList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QuotaList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Quota{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QuotaSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QuotaSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterLifetimeDays", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ClusterLifetimeDays = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Metrics == nil {
+ m.Metrics = make(k8s_io_api_core_v1.ResourceList)
+ }
+ var mapkey k8s_io_api_core_v1.ResourceName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Metrics[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Region) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Region: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Zones = append(m.Zones, AvailabilityZone{})
+ if err := m.Zones[len(m.Zones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceWatchCacheSize) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceWatchCacheSize: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceWatchCacheSize: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.APIGroup = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CacheSize", wireType)
+ }
+ m.CacheSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CacheSize |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quotas", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Quotas = append(m.Quotas, v1.ObjectReference{})
+ if err := m.Quotas[len(m.Quotas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretBindingList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, SecretBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Seed) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Seed: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Seed: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedBackup) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedBackup: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedBackup: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Provider = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Region = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedDNS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedDNS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedDNS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressDomain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.IngressDomain = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Provider == nil {
+ m.Provider = &SeedDNSProvider{}
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedDNSProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedDNSProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedDNSProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Domains == nil {
+ m.Domains = &DNSIncludeExclude{}
+ }
+ if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Zones == nil {
+ m.Zones = &DNSIncludeExclude{}
+ }
+ if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Seed{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedNetworks) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedNetworks: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedNetworks: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Nodes = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Pods = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Services = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ShootDefaults", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ShootDefaults == nil {
+ m.ShootDefaults = &ShootNetworks{}
+ }
+ if err := m.ShootDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BlockCIDRs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BlockCIDRs = append(m.BlockCIDRs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LabelSelector == nil {
+ m.LabelSelector = &v11.LabelSelector{}
+ }
+ if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProviderTypes = append(m.ProviderTypes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingExcessCapacityReservation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingLoadBalancerServices) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingLoadBalancerServices: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingLoadBalancerServices: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingScheduling) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingScheduling: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingScheduling: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Visible", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Visible = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingShootDNS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingShootDNS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingShootDNS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettingVerticalPodAutoscaler) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSettings) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSettings: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSettings: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExcessCapacityReservation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExcessCapacityReservation == nil {
+ m.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{}
+ }
+ if err := m.ExcessCapacityReservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Scheduling == nil {
+ m.Scheduling = &SeedSettingScheduling{}
+ }
+ if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ShootDNS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ShootDNS == nil {
+ m.ShootDNS = &SeedSettingShootDNS{}
+ }
+ if err := m.ShootDNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerServices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LoadBalancerServices == nil {
+ m.LoadBalancerServices = &SeedSettingLoadBalancerServices{}
+ }
+ if err := m.LoadBalancerServices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VerticalPodAutoscaler == nil {
+ m.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{}
+ }
+ if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Backup", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Backup == nil {
+ m.Backup = &SeedBackup{}
+ }
+ if err := m.Backup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Networks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &v1.SecretReference{}
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Taints = append(m.Taints, SeedTaint{})
+ if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Volume == nil {
+ m.Volume = &SeedVolume{}
+ }
+ if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Settings == nil {
+ m.Settings = &SeedSettings{}
+ }
+ if err := m.Settings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Ingress == nil {
+ m.Ingress = &Ingress{}
+ }
+ if err := m.Ingress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Gardener == nil {
+ m.Gardener = &Gardener{}
+ }
+ if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.KubernetesVersion = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ClusterIdentity = &s
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Capacity == nil {
+ m.Capacity = make(k8s_io_api_core_v1.ResourceList)
+ }
+ var mapkey k8s_io_api_core_v1.ResourceName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Capacity[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Allocatable == nil {
+ m.Allocatable = make(k8s_io_api_core_v1.ResourceList)
+ }
+ var mapkey k8s_io_api_core_v1.ResourceName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Allocatable[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedTaint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedTaint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedTaint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Value = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedVolume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinimumSize", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MinimumSize == nil {
+ m.MinimumSize = &resource.Quantity{}
+ }
+ if err := m.MinimumSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Providers = append(m.Providers, SeedVolumeProvider{})
+ if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SeedVolumeProvider: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SeedVolumeProvider: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Purpose = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccountConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccountConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Issuer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Issuer = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SigningKeySecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SigningKeySecret == nil {
+ m.SigningKeySecret = &v1.LocalObjectReference{}
+ }
+ if err := m.SigningKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Shoot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Shoot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Shoot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Shoot{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootMachineImage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootMachineImage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootMachineImage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Version = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootNetworks) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootNetworks: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootNetworks: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Pods = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Services = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addons", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Addons == nil {
+ m.Addons = &Addons{}
+ }
+ if err := m.Addons.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CloudProfileName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CloudProfileName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DNS == nil {
+ m.DNS = &DNS{}
+ }
+ if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Extensions = append(m.Extensions, Extension{})
+ if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hibernation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Hibernation == nil {
+ m.Hibernation = &Hibernation{}
+ }
+ if err := m.Hibernation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Networking", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Networking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Maintenance", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Maintenance == nil {
+ m.Maintenance = &Maintenance{}
+ }
+ if err := m.Maintenance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Monitoring", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Monitoring == nil {
+ m.Monitoring = &Monitoring{}
+ }
+ if err := m.Monitoring.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ShootPurpose(dAtA[iNdEx:postIndex])
+ m.Purpose = &s
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Region = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretBindingName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretBindingName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SeedName = &s
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SeedSelector == nil {
+ m.SeedSelector = &SeedSelector{}
+ }
+ if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, NamedResourceReference{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tolerations = append(m.Tolerations, Toleration{})
+ if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ShootStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ShootStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ShootStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Constraints = append(m.Constraints, Condition{})
+ if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsHibernated", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsHibernated = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastOperation == nil {
+ m.LastOperation = &LastOperation{}
+ }
+ if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastErrors", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LastErrors = append(m.LastErrors, LastError{})
+ if err := m.LastErrors[len(m.LastErrors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RetryCycleStartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RetryCycleStartTime == nil {
+ m.RetryCycleStartTime = &v11.Time{}
+ }
+ if err := m.RetryCycleStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SeedName = &s
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TechnicalID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TechnicalID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ClusterIdentity = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Toleration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Toleration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Value = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VerticalPodAutoscaler) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VerticalPodAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictAfterOOMThreshold", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EvictAfterOOMThreshold == nil {
+ m.EvictAfterOOMThreshold = &v11.Duration{}
+ }
+ if err := m.EvictAfterOOMThreshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateBurst", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EvictionRateBurst = &v
+ case 4:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateLimit", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.EvictionRateLimit = &v2
+ case 5:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvictionTolerance", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.EvictionTolerance = &v2
+ case 6:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecommendationMarginFraction", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.RecommendationMarginFraction = &v2
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdaterInterval", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpdaterInterval == nil {
+ m.UpdaterInterval = &v11.Duration{}
+ }
+ if err := m.UpdaterInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecommenderInterval", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RecommenderInterval == nil {
+ m.RecommenderInterval = &v11.Duration{}
+ }
+ if err := m.RecommenderInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Volume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Volume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Name = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Type = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeSize = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Encrypted = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VolumeType) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VolumeType: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VolumeType: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Class = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Usable = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCacheSizes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCacheSizes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCacheSizes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Default = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, ResourceWatchCacheSize{})
+ if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Worker) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Worker: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Worker: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.CABundle = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CRI == nil {
+ m.CRI = &CRI{}
+ }
+ if err := m.CRI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kubernetes == nil {
+ m.Kubernetes = &WorkerKubernetes{}
+ }
+ if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Machine", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Machine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType)
+ }
+ m.Maximum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Maximum |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType)
+ }
+ m.Minimum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Minimum |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ProviderConfig == nil {
+ m.ProviderConfig = &runtime.RawExtension{}
+ }
+ if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Taints = append(m.Taints, v1.Taint{})
+ if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Volume == nil {
+ m.Volume = &Volume{}
+ }
+ if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataVolumes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DataVolumes = append(m.DataVolumes, DataVolume{})
+ if err := m.DataVolumes[len(m.DataVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeletDataVolumeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.KubeletDataVolumeName = &s
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Zones = append(m.Zones, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 18:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemComponents", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SystemComponents == nil {
+ m.SystemComponents = &WorkerSystemComponents{}
+ }
+ if err := m.SystemComponents.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 19:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineControllerManagerSettings", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MachineControllerManagerSettings == nil {
+ m.MachineControllerManagerSettings = &MachineControllerManagerSettings{}
+ }
+ if err := m.MachineControllerManagerSettings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WorkerKubernetes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkerKubernetes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkerKubernetes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kubelet == nil {
+ m.Kubelet = &KubeletConfig{}
+ }
+ if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WorkerSystemComponents) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkerSystemComponents: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkerSystemComponents: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allow", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Allow = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto
new file mode 100644
index 0000000..ddc222f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto
@@ -0,0 +1,2267 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package github.com.gardener.gardener.pkg.apis.core.v1beta1;
+
+import "k8s.io/api/autoscaling/v1/generated.proto";
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/api/rbac/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// Addon allows enabling or disabling a specific addon and is used to derive from.
+message Addon {
+ // Enabled indicates whether the addon is enabled or not.
+ optional bool enabled = 1;
+}
+
+// Addons is a collection of configuration for specific addons which are managed by the Gardener.
+message Addons {
+ // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon.
+ // +optional
+ optional KubernetesDashboard kubernetesDashboard = 1;
+
+ // NginxIngress holds configuration settings for the nginx-ingress addon.
+ // +optional
+ optional NginxIngress nginxIngress = 2;
+}
+
+// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration.
+message AdmissionPlugin {
+ // Name is the name of the plugin.
+ optional string name = 1;
+
+ // Config is the configuration of the plugin.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension config = 2;
+}
+
+// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how).
+message Alerting {
+ // MonitoringEmailReceivers is a list of recipients for alerts
+ // +optional
+ repeated string emailReceivers = 1;
+}
+
+// AuditConfig contains settings for audit of the api server
+message AuditConfig {
+ // AuditPolicy contains configuration settings for audit policy of the kube-apiserver.
+ // +optional
+ optional AuditPolicy auditPolicy = 1;
+}
+
+// AuditPolicy contains audit policy for kube-apiserver
+message AuditPolicy {
+ // ConfigMapRef is a reference to a ConfigMap object in the same namespace,
+ // which contains the audit policy for the kube-apiserver.
+ // +optional
+ optional k8s.io.api.core.v1.ObjectReference configMapRef = 1;
+}
+
+// AvailabilityZone is an availability zone.
+message AvailabilityZone {
+ // Name is an an availability zone name.
+ optional string name = 1;
+
+ // UnavailableMachineTypes is a list of machine type names that are not availability in this zone.
+ // +optional
+ repeated string unavailableMachineTypes = 2;
+
+ // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone.
+ // +optional
+ repeated string unavailableVolumeTypes = 3;
+}
+
+// BackupBucket holds details about backup bucket
+message BackupBucket {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the Backup Bucket.
+ optional BackupBucketSpec spec = 2;
+
+ // Most recently observed status of the Backup Bucket.
+ optional BackupBucketStatus status = 3;
+}
+
+// BackupBucketList is a list of BackupBucket objects.
+message BackupBucketList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of BackupBucket.
+ repeated BackupBucket items = 2;
+}
+
+// BackupBucketProvider holds the details of cloud provider of the object store.
+message BackupBucketProvider {
+ // Type is the type of provider.
+ optional string type = 1;
+
+ // Region is the region of the bucket.
+ optional string region = 2;
+}
+
+// BackupBucketSpec is the specification of a Backup Bucket.
+message BackupBucketSpec {
+ // Provider hold the details of cloud provider of the object store.
+ optional BackupBucketProvider provider = 1;
+
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 3;
+
+ // SeedName holds the name of the seed allocated to BackupBucket for running controller.
+ // +optional
+ optional string seedName = 4;
+}
+
+// BackupBucketStatus holds the most recently observed status of the Backup Bucket.
+message BackupBucketStatus {
+ // ProviderStatus is the configuration passed to BackupBucket resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 1;
+
+ // LastOperation holds information about the last operation on the BackupBucket.
+ // +optional
+ optional LastOperation lastOperation = 2;
+
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ optional LastError lastError = 3;
+
+ // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the
+ // BackupBucket's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 4;
+
+ // GeneratedSecretRef is reference to the secret generated by backup bucket, which
+ // will have object store specific credentials.
+ // +optional
+ optional k8s.io.api.core.v1.SecretReference generatedSecretRef = 5;
+}
+
+// BackupEntry holds details about shoot backup.
+message BackupEntry {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of the Backup Entry.
+ // +optional
+ optional BackupEntrySpec spec = 2;
+
+ // Status contains the most recently observed status of the Backup Entry.
+ // +optional
+ optional BackupEntryStatus status = 3;
+}
+
+// BackupEntryList is a list of BackupEntry objects.
+message BackupEntryList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of BackupEntry.
+ repeated BackupEntry items = 2;
+}
+
+// BackupEntrySpec is the specification of a Backup Entry.
+message BackupEntrySpec {
+ // BucketName is the name of backup bucket for this Backup Entry.
+ optional string bucketName = 1;
+
+ // SeedName holds the name of the seed allocated to BackupEntry for running controller.
+ // +optional
+ optional string seedName = 2;
+}
+
+// BackupEntryStatus holds the most recently observed status of the Backup Entry.
+message BackupEntryStatus {
+ // LastOperation holds information about the last operation on the BackupEntry.
+ // +optional
+ optional LastOperation lastOperation = 1;
+
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ optional LastError lastError = 2;
+
+ // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the
+ // BackupEntry's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 3;
+}
+
+// CRI contains information about the Container Runtimes.
+message CRI {
+ // The name of the CRI library
+ optional string name = 1;
+
+ // ContainerRuntimes is the list of the required container runtimes supported for a worker pool.
+ // +optional
+ repeated ContainerRuntime containerRuntimes = 2;
+}
+
+// CloudInfo contains information about the cloud
+message CloudInfo {
+ // Type is the cloud type
+ optional string type = 1;
+
+ // Region is the cloud region
+ optional string region = 2;
+}
+
+// CloudProfile represents certain properties about a provider environment.
+message CloudProfile {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the provider environment properties.
+ // +optional
+ optional CloudProfileSpec spec = 2;
+}
+
+// CloudProfileList is a collection of CloudProfiles.
+message CloudProfileList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of CloudProfiles.
+ repeated CloudProfile items = 2;
+}
+
+// CloudProfileSpec is the specification of a CloudProfile.
+// It must contain exactly one of its defined keys.
+message CloudProfileSpec {
+ // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile.
+ // +optional
+ optional string caBundle = 1;
+
+ // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+ optional KubernetesSettings kubernetes = 2;
+
+ // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated MachineImage machineImages = 3;
+
+ // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated MachineType machineTypes = 4;
+
+ // ProviderConfig contains provider-specific configuration for the profile.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 5;
+
+ // Regions contains constraints regarding allowed values for regions and zones.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated Region regions = 6;
+
+ // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile.
+ // An empty list means that all seeds of the same provider type are supported.
+ // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes.
+ // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider
+ // type of the seed must match the shoot's provider.
+ // +optional
+ optional SeedSelector seedSelector = 7;
+
+ // Type is the name of the provider.
+ optional string type = 8;
+
+ // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated VolumeType volumeTypes = 9;
+}
+
+// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+message ClusterAutoscaler {
+ // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterAdd = 1;
+
+ // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterDelete = 2;
+
+ // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterFailure = 3;
+
+ // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownUnneededTime = 4;
+
+ // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed
+ // +optional
+ optional double scaleDownUtilizationThreshold = 5;
+
+ // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scanInterval = 6;
+}
+
+// ClusterInfo contains information about the Plant cluster
+message ClusterInfo {
+ // Cloud describes the cloud information
+ optional CloudInfo cloud = 1;
+
+ // Kubernetes describes kubernetes meta information (e.g., version)
+ optional KubernetesInfo kubernetes = 2;
+}
+
+// Condition holds the information about the state of a resource.
+message Condition {
+ // Type of the Shoot condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Last time the condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
+
+ // The reason for the condition's last transition.
+ optional string reason = 5;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 6;
+
+ // Well-defined error codes in case the condition reports a problem.
+ // +optional
+ repeated string codes = 7;
+}
+
+// ContainerRuntime contains information about worker's available container runtime
+message ContainerRuntime {
+ // Type is the type of the Container Runtime.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to container runtime resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+}
+
+// ControllerDeployment contains information for how this controller is deployed.
+message ControllerDeployment {
+ // Type is the deployment type.
+ optional string type = 1;
+
+ // ProviderConfig contains type-specific configuration.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Policy controls how the controller is deployed. It defaults to 'OnDemand'.
+ // +optional
+ optional string policy = 3;
+
+ // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be
+ // considered for a deployment.
+ // An empty list means that all seeds are selected.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector seedSelector = 4;
+}
+
+// ControllerInstallation represents an installation request for an external controller.
+message ControllerInstallation {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this installation.
+ optional ControllerInstallationSpec spec = 2;
+
+ // Status contains the status of this installation.
+ optional ControllerInstallationStatus status = 3;
+}
+
+// ControllerInstallationList is a collection of ControllerInstallations.
+message ControllerInstallationList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ControllerInstallations.
+ repeated ControllerInstallation items = 2;
+}
+
+// ControllerInstallationSpec is the specification of a ControllerInstallation.
+message ControllerInstallationSpec {
+ // RegistrationRef is used to reference a ControllerRegistration resources.
+ optional k8s.io.api.core.v1.ObjectReference registrationRef = 1;
+
+ // SeedRef is used to reference a Seed resources.
+ optional k8s.io.api.core.v1.ObjectReference seedRef = 2;
+}
+
+// ControllerInstallationStatus is the status of a ControllerInstallation.
+message ControllerInstallationStatus {
+ // Conditions represents the latest available observations of a ControllerInstallations's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 1;
+
+ // ProviderStatus contains type-specific status.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 2;
+}
+
+// ControllerRegistration represents a registration of an external controller.
+message ControllerRegistration {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this registration.
+ optional ControllerRegistrationSpec spec = 2;
+}
+
+// ControllerRegistrationList is a collection of ControllerRegistrations.
+message ControllerRegistrationList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of ControllerRegistrations.
+ repeated ControllerRegistration items = 2;
+}
+
+// ControllerRegistrationSpec is the specification of a ControllerRegistration.
+message ControllerRegistrationSpec {
+ // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types
+ // (aws-route53, gcp, auditlog, ...).
+ // +optional
+ repeated ControllerResource resources = 1;
+
+ // Deployment contains information for how this controller is deployed.
+ // +optional
+ optional ControllerDeployment deployment = 2;
+}
+
+// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this
+// kind (aws-route53, gcp, auditlog, ...).
+message ControllerResource {
+ // Kind is the resource kind, for example "OperatingSystemConfig".
+ optional string kind = 1;
+
+ // Type is the resource type, for example "coreos" or "ubuntu".
+ optional string type = 2;
+
+ // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters.
+ // +optional
+ optional bool globallyEnabled = 3;
+
+ // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration reconcileTimeout = 4;
+
+ // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension
+ // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type
+ // combination.
+ // +optional
+ optional bool primary = 5;
+}
+
+// DNS holds information about the provider, the hosted zone id and the domain.
+message DNS {
+ // Domain is the external available domain of the Shoot cluster. This domain will be written into the
+ // kubeconfig that is handed out to end-users. Once set it is immutable.
+ // +optional
+ optional string domain = 1;
+
+ // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if
+ // not a default domain is used.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated DNSProvider providers = 2;
+}
+
+message DNSIncludeExclude {
+ // Include is a list of resources that shall be included.
+ // +optional
+ repeated string include = 1;
+
+ // Exclude is a list of resources that shall be excluded.
+ // +optional
+ repeated string exclude = 2;
+}
+
+// DNSProvider contains information about a DNS provider.
+message DNSProvider {
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude domains = 1;
+
+ // Primary indicates that this DNSProvider is used for shoot related domains.
+ // +optional
+ optional bool primary = 2;
+
+ // SecretName is a name of a secret containing credentials for the stated domain and the
+ // provider. When not specified, the Gardener will use the cloud provider credentials referenced
+ // by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override
+ // this behavior, i.e. forcing the Gardener to only look into the given secret.
+ // +optional
+ optional string secretName = 3;
+
+ // Type is the DNS provider type.
+ // +optional
+ optional string type = 4;
+
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude zones = 5;
+}
+
+// DataVolume contains information about a data volume.
+message DataVolume {
+ // Name of the volume to make it referencable.
+ optional string name = 1;
+
+ // Type is the type of the volume.
+ // +optional
+ optional string type = 2;
+
+ // VolumeSize is the size of the volume.
+ optional string size = 3;
+
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ optional bool encrypted = 4;
+}
+
+// Endpoint is an endpoint for monitoring, logging and other services around the plant.
+message Endpoint {
+ // Name is the name of the endpoint
+ optional string name = 1;
+
+ // URL is the url of the endpoint
+ optional string url = 2;
+
+ // Purpose is the purpose of the endpoint
+ optional string purpose = 3;
+}
+
+// ExpirableVersion contains a version and an expiration date.
+message ExpirableVersion {
+ // Version is the version identifier.
+ optional string version = 1;
+
+ // ExpirationDate defines the time at which this version expires.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationDate = 2;
+
+ // Classification defines the state of a version (preview, supported, deprecated)
+ // +optional
+ optional string classification = 3;
+}
+
+// Extension contains type and provider information for Shoot extensions.
+message Extension {
+ // Type is the type of the extension resource.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to extension resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators.
+ // +optional
+ optional bool disabled = 3;
+}
+
+// Gardener holds the information about the Gardener version that operated a resource.
+message Gardener {
+ // ID is the Docker container id of the Gardener which last acted on a resource.
+ optional string id = 1;
+
+ // Name is the hostname (pod name) of the Gardener which last acted on a resource.
+ optional string name = 2;
+
+ // Version is the version of the Gardener which last acted on a resource.
+ optional string version = 3;
+}
+
+// Hibernation contains information whether the Shoot is suspended or not.
+message Hibernation {
+ // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated.
+ // If it is false or nil, the Shoot's desired state is to be awaken.
+ // +optional
+ optional bool enabled = 1;
+
+ // Schedules determine the hibernation schedules.
+ // +optional
+ repeated HibernationSchedule schedules = 2;
+}
+
+// HibernationSchedule determines the hibernation schedule of a Shoot.
+// A Shoot will be regularly hibernated at each start time and will be woken up at each end time.
+// Start or End can be omitted, though at least one of each has to be specified.
+message HibernationSchedule {
+ // Start is a Cron spec at which time a Shoot will be hibernated.
+ // +optional
+ optional string start = 1;
+
+ // End is a Cron spec at which time a Shoot will be woken up.
+ // +optional
+ optional string end = 2;
+
+ // Location is the time location in which both start and and shall be evaluated.
+ // +optional
+ optional string location = 3;
+}
+
+// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+// Note: Descriptions were taken from the Kubernetes documentation.
+message HorizontalPodAutoscalerConfig {
+ // The period after which a ready pod transition is considered to be the first.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration cpuInitializationPeriod = 1;
+
+ // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration downscaleDelay = 2;
+
+ // The configurable window at which the controller will choose the highest recommendation for autoscaling.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration downscaleStabilization = 3;
+
+ // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration initialReadinessDelay = 4;
+
+ // The period for syncing the number of pods in horizontal pod autoscaler.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration syncPeriod = 5;
+
+ // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.
+ // +optional
+ optional double tolerance = 6;
+
+ // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration upscaleDelay = 7;
+}
+
+// Ingress configures the Ingress specific settings of the Seed cluster
+message Ingress {
+ // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ optional string domain = 1;
+
+ // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain
+ optional IngressController controller = 2;
+}
+
+// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain
+message IngressController {
+ // Kind defines which kind of IngressController to use, for example `nginx`
+ optional string kind = 1;
+
+ // ProviderConfig specifies infrastructure specific configuration for the ingressController
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+}
+
+// KubeAPIServerConfig contains configuration settings for the kube-apiserver.
+message KubeAPIServerConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding
+ // configuration.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated AdmissionPlugin admissionPlugins = 2;
+
+ // APIAudiences are the identifiers of the API. The service account token authenticator will
+ // validate that tokens used against the API are bound to at least one of these audiences.
+ // Defaults to ["kubernetes"].
+ // +optional
+ repeated string apiAudiences = 3;
+
+ // AuditConfig contains configuration settings for the audit of the kube-apiserver.
+ // +optional
+ optional AuditConfig auditConfig = 4;
+
+ // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not.
+ // +optional
+ optional bool enableBasicAuthentication = 5;
+
+ // OIDCConfig contains configuration settings for the OIDC provider.
+ // +optional
+ optional OIDCConfig oidcConfig = 6;
+
+ // RuntimeConfig contains information about enabled or disabled APIs.
+ // +optional
+ map runtimeConfig = 7;
+
+ // ServiceAccountConfig contains configuration settings for the service account handling
+ // of the kube-apiserver.
+ // +optional
+ optional ServiceAccountConfig serviceAccountConfig = 8;
+
+ // WatchCacheSizes contains configuration of the API server's watch cache sizes.
+ // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests
+ // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's
+ // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it
+ // might happen that controller watches are permanently stopped with `too old resource version` errors.
+ // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch
+ // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache).
+ // +optional
+ optional WatchCacheSizes watchCacheSizes = 9;
+
+ // Requests contains configuration for request-specific settings for the kube-apiserver.
+ // +optional
+ optional KubeAPIServerRequests requests = 10;
+}
+
+// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver.
+message KubeAPIServerRequests {
+ // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ optional int32 maxNonMutatingInflight = 1;
+
+ // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ optional int32 maxMutatingInflight = 2;
+}
+
+// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.
+message KubeControllerManagerConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+ // +optional
+ optional HorizontalPodAutoscalerConfig horizontalPodAutoscaler = 2;
+
+ // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24)
+ // +optional
+ optional int32 nodeCIDRMaskSize = 3;
+
+ // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration podEvictionTimeout = 4;
+}
+
+// KubeProxyConfig contains configuration settings for the kube-proxy.
+message KubeProxyConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // Mode specifies which proxy mode to use.
+ // defaults to IPTables.
+ // +optional
+ optional string mode = 2;
+}
+
+// KubeSchedulerConfig contains configuration settings for the kube-scheduler.
+message KubeSchedulerConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler.
+ // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits
+ // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware
+ // of all the side-effects and consequences when changing it.
+ // +optional
+ optional string kubeMaxPDVols = 2;
+}
+
+// KubeletConfig contains configuration settings for the kubelet.
+message KubeletConfig {
+ optional KubernetesConfig kubernetesConfig = 1;
+
+ // CPUCFSQuota allows you to disable/enable CPU throttling for Pods.
+ // +optional
+ optional bool cpuCFSQuota = 2;
+
+ // CPUManagerPolicy allows to set alternative CPU management policies (default: none).
+ // +optional
+ optional string cpuManagerPolicy = 3;
+
+ // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "100Mi/1Gi/5%"
+ // nodefs.available: "5%"
+ // nodefs.inodesFree: "5%"
+ // imagefs.available: "5%"
+ // imagefs.inodesFree: "5%"
+ optional KubeletConfigEviction evictionHard = 4;
+
+ // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
+ // +optional
+ // Default: 90
+ optional int32 evictionMaxPodGracePeriod = 5;
+
+ // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure.
+ // +optional
+ // Default: 0 for each resource
+ optional KubeletConfigEvictionMinimumReclaim evictionMinimumReclaim = 6;
+
+ // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
+ // +optional
+ // Default: 4m0s
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictionPressureTransitionPeriod = 7;
+
+ // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "200Mi/1.5Gi/10%"
+ // nodefs.available: "10%"
+ // nodefs.inodesFree: "10%"
+ // imagefs.available: "10%"
+ // imagefs.inodesFree: "10%"
+ optional KubeletConfigEviction evictionSoft = 8;
+
+ // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: 1m30s
+ // nodefs.available: 1m30s
+ // nodefs.inodesFree: 1m30s
+ // imagefs.available: 1m30s
+ // imagefs.inodesFree: 1m30s
+ optional KubeletConfigEvictionSoftGracePeriod evictionSoftGracePeriod = 9;
+
+ // MaxPods is the maximum number of Pods that are allowed by the Kubelet.
+ // +optional
+ // Default: 110
+ optional int32 maxPods = 10;
+
+ // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet.
+ // +optional
+ optional int64 podPidsLimit = 11;
+
+ // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled.
+ // +optional
+ // Default: 1m
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imagePullProgressDeadline = 12;
+
+ // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true).
+ // +optional
+ optional bool failSwapOn = 13;
+
+ // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ // Default: cpu=80m,memory=1Gi,pid=20k
+ optional KubeletConfigReserved kubeReserved = 14;
+
+ // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ optional KubeletConfigReserved systemReserved = 15;
+}
+
+// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value.
+message KubeletConfigEviction {
+ // MemoryAvailable is the threshold for the free memory on the host server.
+ // +optional
+ optional string memoryAvailable = 1;
+
+ // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ optional string imageFSAvailable = 2;
+
+ // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem.
+ // +optional
+ optional string imageFSInodesFree = 3;
+
+ // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ optional string nodeFSAvailable = 4;
+
+ // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem.
+ // +optional
+ optional string nodeFSInodesFree = 5;
+}
+
+// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim.
+message KubeletConfigEvictionMinimumReclaim {
+ // MemoryAvailable is the threshold for the memory reclaim on the host server.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity memoryAvailable = 1;
+
+ // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity imageFSAvailable = 2;
+
+ // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity imageFSInodesFree = 3;
+
+ // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity nodeFSAvailable = 4;
+
+ // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity nodeFSInodesFree = 5;
+}
+
+// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds.
+message KubeletConfigEvictionSoftGracePeriod {
+ // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration memoryAvailable = 1;
+
+ // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imageFSAvailable = 2;
+
+ // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imageFSInodesFree = 3;
+
+ // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSAvailable = 4;
+
+ // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSInodesFree = 5;
+}
+
+// KubeletConfigReserved contains reserved resources for daemons
+message KubeletConfigReserved {
+ // CPU is the reserved cpu.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1;
+
+ // Memory is the reserved memory.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 2;
+
+ // EphemeralStorage is the reserved ephemeral-storage.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity ephemeralStorage = 3;
+
+ // PID is the reserved process-ids.
+ // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity pid = 4;
+}
+
+// Kubernetes contains the version and configuration variables for the Shoot control plane.
+message Kubernetes {
+ // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true).
+ // +optional
+ optional bool allowPrivilegedContainers = 1;
+
+ // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+ // +optional
+ optional ClusterAutoscaler clusterAutoscaler = 2;
+
+ // KubeAPIServer contains configuration settings for the kube-apiserver.
+ // +optional
+ optional KubeAPIServerConfig kubeAPIServer = 3;
+
+ // KubeControllerManager contains configuration settings for the kube-controller-manager.
+ // +optional
+ optional KubeControllerManagerConfig kubeControllerManager = 4;
+
+ // KubeScheduler contains configuration settings for the kube-scheduler.
+ // +optional
+ optional KubeSchedulerConfig kubeScheduler = 5;
+
+ // KubeProxy contains configuration settings for the kube-proxy.
+ // +optional
+ optional KubeProxyConfig kubeProxy = 6;
+
+ // Kubelet contains configuration settings for the kubelet.
+ // +optional
+ optional KubeletConfig kubelet = 7;
+
+ // Version is the semantic Kubernetes version to use for the Shoot cluster.
+ optional string version = 8;
+
+ // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+ // +optional
+ optional VerticalPodAutoscaler verticalPodAutoscaler = 9;
+}
+
+// KubernetesConfig contains common configuration fields for the control plane components.
+message KubernetesConfig {
+ // FeatureGates contains information about enabled feature gates.
+ // +optional
+ map featureGates = 1;
+}
+
+// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon.
+message KubernetesDashboard {
+ optional Addon addon = 2;
+
+ // AuthenticationMode defines the authentication mode for the kubernetes-dashboard.
+ // +optional
+ optional string authenticationMode = 1;
+}
+
+// KubernetesInfo contains the version and configuration variables for the Plant cluster.
+message KubernetesInfo {
+ // Version is the semantic Kubernetes version to use for the Plant cluster.
+ optional string version = 1;
+}
+
+// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+message KubernetesSettings {
+ // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters.
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ // +optional
+ repeated ExpirableVersion versions = 1;
+}
+
+// LastError indicates the last occurred error for an operation on a resource.
+message LastError {
+ // A human readable message indicating details about the last error.
+ optional string description = 1;
+
+ // ID of the task which caused this last error
+ // +optional
+ optional string taskID = 2;
+
+ // Well-defined error codes of the last error(s).
+ // +optional
+ repeated string codes = 3;
+
+ // Last time the error was reported
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4;
+}
+
+// LastOperation indicates the type and the state of the last operation, along with a description
+// message and a progress indicator.
+message LastOperation {
+ // A human readable message indicating details about the last operation.
+ optional string description = 1;
+
+ // Last time the operation state transitioned from one to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 2;
+
+ // The progress in percentage (0-100) of the last operation.
+ optional int32 progress = 3;
+
+ // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed.
+ optional string state = 4;
+
+ // Type of the last operation, one of Create, Reconcile, Delete.
+ optional string type = 5;
+}
+
+// Machine contains information about the machine type and image.
+message Machine {
+ // Type is the machine type of the worker group.
+ optional string type = 1;
+
+ // Image holds information about the machine image to use for all nodes of this pool. It will default to the
+ // latest version of the first image stated in the referenced CloudProfile if no value has been provided.
+ // +optional
+ optional ShootMachineImage image = 2;
+}
+
+// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+message MachineControllerManagerSettings {
+ // MachineDrainTimeout is the period after which machine is forcefully deleted.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineDrainTimeout = 1;
+
+ // MachineHealthTimeout is the period after which machine is declared failed.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineHealthTimeout = 2;
+
+ // MachineCreationTimeout is the period after which creation of the machine is declared failed.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineCreationTimeout = 3;
+
+ // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered.
+ // +optional
+ optional int32 maxEvictRetries = 4;
+
+ // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed.
+ // +optional
+ repeated string nodeConditions = 5;
+}
+
+// MachineImage defines the name and multiple versions of the machine image in any environment.
+message MachineImage {
+ // Name is the name of the image.
+ optional string name = 1;
+
+ // Versions contains versions, expiration dates and container runtimes of the machine image
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ repeated MachineImageVersion versions = 2;
+}
+
+// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces
+message MachineImageVersion {
+ optional ExpirableVersion expirableVersion = 1;
+
+ // CRI list of supported container runtime and interfaces supported by this version
+ // +optional
+ repeated CRI cri = 2;
+}
+
+// MachineType contains certain properties of a machine type.
+message MachineType {
+ // CPU is the number of CPUs for this machine type.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1;
+
+ // GPU is the number of GPUs for this machine type.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity gpu = 2;
+
+ // Memory is the amount of memory for this machine type.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 3;
+
+ // Name is the name of the machine type.
+ optional string name = 4;
+
+ // Storage is the amount of storage associated with the root volume of this machine type.
+ // +optional
+ optional MachineTypeStorage storage = 5;
+
+ // Usable defines if the machine type can be used for shoot clusters.
+ // +optional
+ optional bool usable = 6;
+}
+
+// MachineTypeStorage is the amount of storage associated with the root volume of this machine type.
+message MachineTypeStorage {
+ // Class is the class of the storage type.
+ optional string class = 1;
+
+ // StorageSize is the storage size.
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity size = 2;
+
+ // Type is the type of the storage.
+ optional string type = 3;
+}
+
+// Maintenance contains information about the time window for maintenance operations and which
+// operations should be performed.
+message Maintenance {
+ // AutoUpdate contains information about which constraints should be automatically updated.
+ // +optional
+ optional MaintenanceAutoUpdate autoUpdate = 1;
+
+ // TimeWindow contains information about the time window for maintenance operations.
+ // +optional
+ optional MaintenanceTimeWindow timeWindow = 2;
+
+ // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately.
+ // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger
+ // an immediate roll out which is changes to the Spec.Hibernation.Enabled field.
+ // +optional
+ optional bool confineSpecUpdateRollout = 3;
+}
+
+// MaintenanceAutoUpdate contains information about which constraints should be automatically updated.
+message MaintenanceAutoUpdate {
+ // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true).
+ optional bool kubernetesVersion = 1;
+
+ // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true).
+ optional bool machineImageVersion = 2;
+}
+
+// MaintenanceTimeWindow contains information about the time window for maintenance operations.
+message MaintenanceTimeWindow {
+ // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, a random value will be computed.
+ optional string begin = 1;
+
+ // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, the value will be computed based on the "Begin" value.
+ optional string end = 2;
+}
+
+// Monitoring contains information about the monitoring configuration for the shoot.
+message Monitoring {
+ // Alerting contains information about the alerting configuration for the shoot cluster.
+ // +optional
+ optional Alerting alerting = 1;
+}
+
+// NamedResourceReference is a named reference to a resource.
+message NamedResourceReference {
+ // Name of the resource reference.
+ optional string name = 1;
+
+ // ResourceRef is a reference to a resource.
+ optional k8s.io.api.autoscaling.v1.CrossVersionObjectReference resourceRef = 2;
+}
+
+// Networking defines networking parameters for the shoot cluster.
+message Networking {
+ // Type identifies the type of the networking plugin.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to network resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Pods is the CIDR of the pod network.
+ // +optional
+ optional string pods = 3;
+
+ // Nodes is the CIDR of the entire node network.
+ // +optional
+ optional string nodes = 4;
+
+ // Services is the CIDR of the service network.
+ // +optional
+ optional string services = 5;
+}
+
+// NginxIngress describes configuration values for the nginx-ingress addon.
+message NginxIngress {
+ optional Addon addon = 1;
+
+ // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress
+ // +optional
+ repeated string loadBalancerSourceRanges = 2;
+
+ // Config contains custom configuration for the nginx-ingress-controller configuration.
+ // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options
+ // +optional
+ map config = 3;
+
+ // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service`
+ // exposing the nginx-ingress. Defaults to `Cluster`.
+ // +optional
+ optional string externalTrafficPolicy = 4;
+}
+
+// OIDCConfig contains configuration settings for the OIDC provider.
+// Note: Descriptions were taken from the Kubernetes documentation.
+message OIDCConfig {
+ // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.
+ // +optional
+ optional string caBundle = 1;
+
+ // ClientAuthentication can optionally contain client configuration used for kubeconfig generation.
+ // +optional
+ optional OpenIDConnectClientAuthentication clientAuthentication = 2;
+
+ // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.
+ // +optional
+ optional string clientID = 3;
+
+ // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.
+ // +optional
+ optional string groupsClaim = 4;
+
+ // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.
+ // +optional
+ optional string groupsPrefix = 5;
+
+ // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).
+ // +optional
+ optional string issuerURL = 6;
+
+ // ATTENTION: Only meaningful for Kubernetes >= 1.11
+ // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value.
+ // +optional
+ map requiredClaims = 7;
+
+ // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1
+ // +optional
+ repeated string signingAlgs = 8;
+
+ // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub")
+ // +optional
+ optional string usernameClaim = 9;
+
+ // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
+ // +optional
+ optional string usernamePrefix = 10;
+}
+
+// OpenIDConnectClientAuthentication contains configuration for OIDC clients.
+message OpenIDConnectClientAuthentication {
+ // Extra configuration added to kubeconfig's auth-provider.
+ // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token
+ // +optional
+ map extraConfig = 1;
+
+ // The client Secret for the OpenID Connect client.
+ // +optional
+ optional string secret = 2;
+}
+
+message Plant {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this Plant.
+ optional PlantSpec spec = 2;
+
+ // Status contains the status of this Plant.
+ optional PlantStatus status = 3;
+}
+
+// PlantList is a collection of Plants.
+message PlantList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Plants.
+ repeated Plant items = 2;
+}
+
+// PlantSpec is the specification of a Plant.
+message PlantSpec {
+ // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes
+ // clusters to be added to Gardener.
+ optional k8s.io.api.core.v1.LocalObjectReference secretRef = 1;
+
+ // Endpoints is the configuration plant endpoints
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated Endpoint endpoints = 2;
+}
+
+// PlantStatus is the status of a Plant.
+message PlantStatus {
+ // Conditions represents the latest available observations of a Plant's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 1;
+
+ // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the
+ // Plant's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 2;
+
+ // ClusterInfo is additional computed information about the newly added cluster (Plant)
+ optional ClusterInfo clusterInfo = 3;
+}
+
+// Project holds certain properties about a Gardener project.
+message Project {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the project properties.
+ // +optional
+ optional ProjectSpec spec = 2;
+
+ // Most recently observed status of the Project.
+ // +optional
+ optional ProjectStatus status = 3;
+}
+
+// ProjectList is a collection of Projects.
+message ProjectList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Projects.
+ repeated Project items = 2;
+}
+
+// ProjectMember is a member of a project.
+message ProjectMember {
+ // Subject is representing a user name, an email address, or any other identifier of a user, group, or service
+ // account that has a certain role.
+ optional k8s.io.api.rbac.v1.Subject subject = 1;
+
+ // Role represents the role of this member.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles`
+ // list.
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ optional string role = 2;
+
+ // Roles represents the list of roles of this member.
+ // +optional
+ repeated string roles = 3;
+}
+
+// ProjectSpec is the specification of a Project.
+message ProjectSpec {
+ // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user
+ // who created the project.
+ // +optional
+ optional k8s.io.api.rbac.v1.Subject createdBy = 1;
+
+ // Description is a human-readable description of what the project is used for.
+ // +optional
+ optional string description = 2;
+
+ // Owner is a subject representing a user name, an email address, or any other identifier of a user owning
+ // the project.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner`
+ // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way
+ // to change the owner is to use this field.
+ // +optional
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ optional k8s.io.api.rbac.v1.Subject owner = 3;
+
+ // Purpose is a human-readable explanation of the project's purpose.
+ // +optional
+ optional string purpose = 4;
+
+ // Members is a list of subjects representing a user name, an email address, or any other identifier of a user,
+ // group, or service account that has a certain role.
+ // +optional
+ repeated ProjectMember members = 5;
+
+ // Namespace is the name of the namespace that has been created for the Project object.
+ // A nil value means that Gardener will determine the name of the namespace.
+ // +optional
+ optional string namespace = 6;
+
+ // Tolerations contains the tolerations for taints on seed clusters.
+ // +optional
+ optional ProjectTolerations tolerations = 7;
+}
+
+// ProjectStatus holds the most recently observed status of the project.
+message ProjectStatus {
+ // ObservedGeneration is the most recent generation observed for this project.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // Phase is the current phase of the project.
+ optional string phase = 2;
+
+ // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleSinceTimestamp = 3;
+
+ // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted
+ // because it's stale/unused.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleAutoDeleteTimestamp = 4;
+}
+
+// ProjectTolerations contains the tolerations for taints on seed clusters.
+message ProjectTolerations {
+ // Defaults contains a list of tolerations that are added to the shoots in this project by default.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ repeated Toleration defaults = 1;
+
+ // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note
+ // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ repeated Toleration whitelist = 2;
+}
+
+// Provider contains provider-specific information that are handed-over to the provider-specific
+// extension controller.
+message Provider {
+ // Type is the type of the provider.
+ optional string type = 1;
+
+ // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension controlPlaneConfig = 2;
+
+ // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension infrastructureConfig = 3;
+
+ // Workers is a list of worker groups.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated Worker workers = 4;
+}
+
+message Quota {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the Quota constraints.
+ // +optional
+ optional QuotaSpec spec = 2;
+}
+
+// QuotaList is a collection of Quotas.
+message QuotaList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Quotas.
+ repeated Quota items = 2;
+}
+
+// QuotaSpec is the specification of a Quota.
+message QuotaSpec {
+ // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically.
+ // +optional
+ optional int32 clusterLifetimeDays = 1;
+
+ // Metrics is a list of resources which will be put under constraints.
+ map metrics = 2;
+
+ // Scope is the scope of the Quota object, either 'project' or 'secret'.
+ optional k8s.io.api.core.v1.ObjectReference scope = 3;
+}
+
+// Region contains certain properties of a region.
+message Region {
+ // Name is a region name.
+ optional string name = 1;
+
+ // Zones is a list of availability zones in this region.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated AvailabilityZone zones = 2;
+
+ // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region.
+ // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt
+ // quality, reliability, access restrictions, etc.
+ // +optional
+ map labels = 3;
+}
+
+// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource.
+message ResourceWatchCacheSize {
+ // APIGroup is the API group of the resource for which the watch cache size should be configured.
+ // An unset value is used to specify the legacy core API (e.g. for `secrets`).
+ // +optional
+ optional string apiGroup = 1;
+
+ // Resource is the name of the resource for which the watch cache size should be configured
+ // (in lowercase plural form, e.g. `secrets`).
+ optional string resource = 2;
+
+ // CacheSize specifies the watch cache size that should be configured for the specified resource.
+ optional int32 size = 3;
+}
+
+message SecretBinding {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // SecretRef is a reference to a secret object in the same or another namespace.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 2;
+
+ // Quotas is a list of references to Quota objects in the same or another namespace.
+ // +optional
+ repeated k8s.io.api.core.v1.ObjectReference quotas = 3;
+}
+
+// SecretBindingList is a collection of SecretBindings.
+message SecretBindingList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of SecretBindings.
+ repeated SecretBinding items = 2;
+}
+
+// Seed represents an installation request for an external controller.
+message Seed {
+ // Standard object metadata.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains the specification of this installation.
+ optional SeedSpec spec = 2;
+
+ // Status contains the status of this installation.
+ optional SeedStatus status = 3;
+}
+
+// SeedBackup contains the object store configuration for backups for shoot (currently only etcd).
+message SeedBackup {
+ // Provider is a provider name.
+ optional string provider = 1;
+
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Region is a region name.
+ // +optional
+ optional string region = 3;
+
+ // SecretRef is a reference to a Secret object containing the cloud provider credentials for
+ // the object store where backups should be stored. It should have enough privileges to manipulate
+ // the objects as well as buckets.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 4;
+}
+
+// SeedDNS contains DNS-relevant information about this seed cluster.
+message SeedDNS {
+ // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ // This will be removed in the next API version and replaced by spec.ingress.domain.
+ // +optional
+ optional string ingressDomain = 1;
+
+ // Provider configures a DNSProvider
+ // +optional
+ optional SeedDNSProvider provider = 2;
+}
+
+// SeedDNSProvider configures a DNSProvider for Seeds
+message SeedDNSProvider {
+ // Type describes the type of the dns-provider, for example `aws-route53`
+ optional string type = 1;
+
+ // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains.
+ optional k8s.io.api.core.v1.SecretReference secretRef = 2;
+
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude domains = 3;
+
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ optional DNSIncludeExclude zones = 4;
+}
+
+// SeedList is a collection of Seeds.
+message SeedList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Seeds.
+ repeated Seed items = 2;
+}
+
+// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster.
+message SeedNetworks {
+ // Nodes is the CIDR of the node network.
+ // +optional
+ optional string nodes = 1;
+
+ // Pods is the CIDR of the pod network.
+ optional string pods = 2;
+
+ // Services is the CIDR of the service network.
+ optional string services = 3;
+
+ // ShootDefaults contains the default networks CIDRs for shoots.
+ // +optional
+ optional ShootNetworks shootDefaults = 4;
+
+ // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running
+ // in the seed cluster.
+ // +optional
+ repeated string blockCIDRs = 5;
+}
+
+// SeedProvider defines the provider type and region for this Seed cluster.
+message SeedProvider {
+ // Type is the name of the provider.
+ optional string type = 1;
+
+ // ProviderConfig is the configuration passed to Seed resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Region is a name of a region.
+ optional string region = 3;
+}
+
+// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile
+message SeedSelector {
+ // LabelSelector is optional and can be used to select seeds by their label settings
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
+
+ // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type.
+ // +optional
+ repeated string providerTypes = 2;
+}
+
+// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the
+// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11
+// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled.
+message SeedSettingExcessCapacityReservation {
+ // Enabled controls whether the excess capacity reservation should be enabled.
+ optional bool enabled = 1;
+}
+
+// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the
+// seed.
+message SeedSettingLoadBalancerServices {
+ // Annotations is a map of annotations that will be injected/merged into every load balancer service object.
+ // +optional
+ map annotations = 1;
+}
+
+// SeedSettingScheduling controls settings for scheduling decisions for the seed.
+message SeedSettingScheduling {
+ // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds
+ // are not considered by the scheduler.
+ optional bool visible = 1;
+}
+
+// SeedSettingShootDNS controls the shoot DNS settings for the seed.
+message SeedSettingShootDNS {
+ // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the
+ // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here.
+ // This is useful for environments where DNS is not required.
+ optional bool enabled = 1;
+}
+
+// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the
+// seed.
+message SeedSettingVerticalPodAutoscaler {
+ // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It
+ // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if
+ // your seed cluster already has another, manually/custom managed VPA deployment.
+ optional bool enabled = 1;
+}
+
+// SeedSettings contains certain settings for this seed cluster.
+message SeedSettings {
+ // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed.
+ // +optional
+ optional SeedSettingExcessCapacityReservation excessCapacityReservation = 1;
+
+ // Scheduling controls settings for scheduling decisions for the seed.
+ // +optional
+ optional SeedSettingScheduling scheduling = 2;
+
+ // ShootDNS controls the shoot DNS settings for the seed.
+ // +optional
+ optional SeedSettingShootDNS shootDNS = 3;
+
+ // LoadBalancerServices controls certain settings for services of type load balancer that are created in the
+ // seed.
+ // +optional
+ optional SeedSettingLoadBalancerServices loadBalancerServices = 4;
+
+ // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed.
+ // +optional
+ optional SeedSettingVerticalPodAutoscaler verticalPodAutoscaler = 5;
+}
+
+// SeedSpec is the specification of a Seed.
+message SeedSpec {
+ // Backup holds the object store configuration for the backups of shoot (currently only etcd).
+ // If it is not specified, then there won't be any backups taken for shoots associated with this seed.
+ // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored
+ // under the configured object store.
+ // +optional
+ optional SeedBackup backup = 1;
+
+ // DNS contains DNS-relevant information about this seed cluster.
+ optional SeedDNS dns = 2;
+
+ // Networks defines the pod, service and worker network of the Seed cluster.
+ optional SeedNetworks networks = 3;
+
+ // Provider defines the provider type and region for this Seed cluster.
+ optional SeedProvider provider = 4;
+
+ // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for
+ // the account the Seed cluster has been deployed to.
+ // +optional
+ optional k8s.io.api.core.v1.SecretReference secretRef = 5;
+
+ // Taints describes taints on the seed.
+ // +optional
+ repeated SeedTaint taints = 6;
+
+ // Volume contains settings for persistentvolumes created in the seed cluster.
+ // +optional
+ optional SeedVolume volume = 7;
+
+ // Settings contains certain settings for this seed cluster.
+ // +optional
+ optional SeedSettings settings = 8;
+
+ // Ingress configures Ingress specific settings of the Seed cluster.
+ // +optional
+ optional Ingress ingress = 9;
+}
+
+// SeedStatus is the status of a Seed.
+message SeedStatus {
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ // +optional
+ optional Gardener gardener = 1;
+
+ // KubernetesVersion is the Kubernetes version of the seed cluster.
+ // +optional
+ optional string kubernetesVersion = 2;
+
+ // Conditions represents the latest available observations of a Seed's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 3;
+
+ // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the
+ // Seed's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 4;
+
+ // ClusterIdentity is the identity of the Seed cluster
+ // +optional
+ optional string clusterIdentity = 5;
+
+ // Capacity represents the total resources of a seed.
+ // +optional
+ map capacity = 6;
+
+ // Allocatable represents the resources of a seed that are available for scheduling.
+ // Defaults to Capacity.
+ // +optional
+ map allocatable = 7;
+}
+
+// SeedTaint describes a taint on a seed.
+message SeedTaint {
+ // Key is the taint key to be applied to a seed.
+ optional string key = 1;
+
+ // Value is the taint value corresponding to the taint key.
+ // +optional
+ optional string value = 2;
+}
+
+// SeedVolume contains settings for persistentvolumes created in the seed cluster.
+message SeedVolume {
+ // MinimumSize defines the minimum size that should be used for PVCs in the seed.
+ // +optional
+ optional k8s.io.apimachinery.pkg.api.resource.Quantity minimumSize = 1;
+
+ // Providers is a list of storage class provisioner types for the seed.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ repeated SeedVolumeProvider providers = 2;
+}
+
+// SeedVolumeProvider is a storage class provisioner type.
+message SeedVolumeProvider {
+ // Purpose is the purpose of this provider.
+ optional string purpose = 1;
+
+ // Name is the name of the storage class provisioner type.
+ optional string name = 2;
+}
+
+// ServiceAccountConfig is the kube-apiserver configuration for service accounts.
+message ServiceAccountConfig {
+ // Issuer is the identifier of the service account token issuer. The issuer will assert this
+ // identifier in "iss" claim of issued tokens. This value is a string or URI.
+ // Defaults to URI of the API server.
+ // +optional
+ optional string issuer = 1;
+
+ // SigningKeySecret is a reference to a secret that contains an optional private key of the
+ // service account token issuer. The issuer will sign issued ID tokens with this private key.
+ // Only useful if service account tokens are also issued by another external system.
+ // +optional
+ optional k8s.io.api.core.v1.LocalObjectReference signingKeySecretName = 2;
+}
+
+message Shoot {
+ // Standard object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the Shoot cluster.
+ // +optional
+ optional ShootSpec spec = 2;
+
+ // Most recently observed status of the Shoot cluster.
+ // +optional
+ optional ShootStatus status = 3;
+}
+
+// ShootList is a list of Shoot objects.
+message ShootList {
+ // Standard list object metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Shoots.
+ repeated Shoot items = 2;
+}
+
+// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be
+// defined in the respective CloudProfile.
+message ShootMachineImage {
+ // Name is the name of the image.
+ optional string name = 1;
+
+ // ProviderConfig is the shoot's individual configuration passed to an extension resource.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2;
+
+ // Version is the version of the shoot's image.
+ // If version is not provided, it will be defaulted to the latest version from the CloudProfile.
+ // +optional
+ optional string version = 3;
+}
+
+// ShootNetworks contains the default networks CIDRs for shoots.
+message ShootNetworks {
+ // Pods is the CIDR of the pod network.
+ // +optional
+ optional string pods = 1;
+
+ // Services is the CIDR of the service network.
+ // +optional
+ optional string services = 2;
+}
+
+// ShootSpec is the specification of a Shoot.
+message ShootSpec {
+ // Addons contains information about enabled/disabled addons and their configuration.
+ // +optional
+ optional Addons addons = 1;
+
+ // CloudProfileName is a name of a CloudProfile object.
+ optional string cloudProfileName = 2;
+
+ // DNS contains information about the DNS settings of the Shoot.
+ // +optional
+ optional DNS dns = 3;
+
+ // Extensions contain type and provider information for Shoot extensions.
+ // +optional
+ repeated Extension extensions = 4;
+
+ // Hibernation contains information whether the Shoot is suspended or not.
+ // +optional
+ optional Hibernation hibernation = 5;
+
+ // Kubernetes contains the version and configuration settings of the control plane components.
+ optional Kubernetes kubernetes = 6;
+
+ // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc.
+ optional Networking networking = 7;
+
+ // Maintenance contains information about the time window for maintenance operations and which
+ // operations should be performed.
+ // +optional
+ optional Maintenance maintenance = 8;
+
+ // Monitoring contains information about custom monitoring configurations for the shoot.
+ // +optional
+ optional Monitoring monitoring = 9;
+
+ // Provider contains all provider-specific and provider-relevant information.
+ optional Provider provider = 10;
+
+ // Purpose is the purpose class for this cluster.
+ // +optional
+ optional string purpose = 11;
+
+ // Region is a name of a region.
+ optional string region = 12;
+
+ // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret.
+ // The credentials inside the provider secret will be used to create the shoot in the respective account.
+ optional string secretBindingName = 13;
+
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot.
+ // +optional
+ optional string seedName = 14;
+
+ // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed.
+ // +optional
+ optional SeedSelector seedSelector = 15;
+
+ // Resources holds a list of named resource references that can be referred to in extension configs by their names.
+ // +optional
+ repeated NamedResourceReference resources = 16;
+
+ // Tolerations contains the tolerations for taints on seed clusters.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ repeated Toleration tolerations = 17;
+}
+
+// ShootStatus holds the most recently observed status of the Shoot cluster.
+message ShootStatus {
+ // Conditions represents the latest available observations of a Shoots's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition conditions = 1;
+
+ // Constraints represents conditions of a Shoot's current state that constraint some operations on it.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ repeated Condition constraints = 2;
+
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ optional Gardener gardener = 3;
+
+ // IsHibernated indicates whether the Shoot is currently hibernated.
+ optional bool hibernated = 4;
+
+ // LastOperation holds information about the last operation on the Shoot.
+ // +optional
+ optional LastOperation lastOperation = 5;
+
+ // LastErrors holds information about the last occurred error(s) during an operation.
+ // +optional
+ repeated LastError lastErrors = 6;
+
+ // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the
+ // Shoot's generation, which is updated on mutation by the API Server.
+ // +optional
+ optional int64 observedGeneration = 7;
+
+ // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation
+ // must be retried until we give up).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time retryCycleStartTime = 8;
+
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot. This value is only written
+ // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds.
+ // +optional
+ optional string seedName = 9;
+
+ // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and
+ // basically everything that is related to this particular Shoot.
+ optional string technicalID = 10;
+
+ // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters.
+ // It is used to compute unique hashes.
+ optional string uid = 11;
+
+ // ClusterIdentity is the identity of the Shoot cluster
+ // +optional
+ optional string clusterIdentity = 12;
+}
+
+// Toleration is a toleration for a seed taint.
+message Toleration {
+ // Key is the toleration key to be applied to a project or shoot.
+ optional string key = 1;
+
+ // Value is the toleration value corresponding to the toleration key.
+ // +optional
+ optional string value = 2;
+}
+
+// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+message VerticalPodAutoscaler {
+ // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster.
+ optional bool enabled = 1;
+
+ // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given
+ // threshold since its start and if it has only one container (default: 10m0s).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictAfterOOMThreshold = 2;
+
+ // EvictionRateBurst defines the burst of pods that can be evicted (default: 1)
+ // +optional
+ optional int32 evictionRateBurst = 3;
+
+ // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will
+ // disable the rate limiter (default: -1).
+ // +optional
+ optional double evictionRateLimit = 4;
+
+ // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one
+ // pod can be evicted (default: 0.5).
+ // +optional
+ optional double evictionTolerance = 5;
+
+ // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request
+ // (default: 0.15).
+ // +optional
+ optional double recommendationMarginFraction = 6;
+
+ // UpdaterInterval is the interval how often the updater should run (default: 1m0s).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration updaterInterval = 7;
+
+ // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s).
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration recommenderInterval = 8;
+}
+
+// Volume contains information about the volume type, size, and encryption.
+message Volume {
+ // Name of the volume to make it referencable.
+ // +optional
+ optional string name = 1;
+
+ // Type is the type of the volume.
+ // +optional
+ optional string type = 2;
+
+ // VolumeSize is the size of the volume.
+ optional string size = 3;
+
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ optional bool encrypted = 4;
+}
+
+// VolumeType contains certain properties of a volume type.
+message VolumeType {
+ // Class is the class of the volume type.
+ optional string class = 1;
+
+ // Name is the name of the volume type.
+ optional string name = 2;
+
+ // Usable defines if the volume type can be used for shoot clusters.
+ // +optional
+ optional bool usable = 3;
+}
+
+// WatchCacheSizes contains configuration of the API server's watch cache sizes.
+message WatchCacheSizes {
+ // Default configures the default watch cache size of the kube-apiserver
+ // (flag `--default-watch-cache-size`, defaults to 100).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ optional int32 default = 1;
+
+ // Resources configures the watch cache size of the kube-apiserver per resource
+ // (flag `--watch-cache-sizes`).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ repeated ResourceWatchCacheSize resources = 2;
+}
+
+// Worker is the base definition of a worker group.
+message Worker {
+ // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool.
+ // +optional
+ map annotations = 1;
+
+ // CABundle is a certificate bundle which will be installed onto every machine of this worker pool.
+ // +optional
+ optional string caBundle = 2;
+
+ // CRI contains configurations of CRI support of every machine in the worker pool
+ // +optional
+ optional CRI cri = 3;
+
+ // Kubernetes contains configuration for Kubernetes components related to this worker pool.
+ // +optional
+ optional WorkerKubernetes kubernetes = 4;
+
+ // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool.
+ // +optional
+ map labels = 5;
+
+ // Name is the name of the worker group.
+ optional string name = 6;
+
+ // Machine contains information about the machine type and image.
+ optional Machine machine = 7;
+
+ // Maximum is the maximum number of VMs to create.
+ optional int32 maximum = 8;
+
+ // Minimum is the minimum number of VMs to create.
+ optional int32 minimum = 9;
+
+ // MaxSurge is maximum number of VMs that are created during an update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 10;
+
+ // MaxUnavailable is the maximum number of VMs that can be unavailable during an update.
+ // +optional
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 11;
+
+ // ProviderConfig is the provider-specific configuration for this worker pool.
+ // +optional
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 12;
+
+ // Taints is a list of taints for all the `Node` objects in this worker pool.
+ // +optional
+ repeated k8s.io.api.core.v1.Taint taints = 13;
+
+ // Volume contains information about the volume type and size.
+ // +optional
+ optional Volume volume = 14;
+
+ // DataVolumes contains a list of additional worker volumes.
+ // +optional
+ repeated DataVolume dataVolumes = 15;
+
+ // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state.
+ // +optional
+ optional string kubeletDataVolumeName = 16;
+
+ // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional
+ // as not every provider may support availability zones.
+ // +optional
+ repeated string zones = 17;
+
+ // SystemComponents contains configuration for system components related to this worker pool
+ // +optional
+ optional WorkerSystemComponents systemComponents = 18;
+
+ // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+ // +optional
+ optional MachineControllerManagerSettings machineControllerManager = 19;
+}
+
+// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool.
+message WorkerKubernetes {
+ // Kubelet contains configuration settings for all kubelets of this worker pool.
+ // +optional
+ optional KubeletConfig kubelet = 1;
+}
+
+// WorkerSystemComponents contains configuration for system components related to this worker pool
+message WorkerSystemComponents {
+ // Allow determines whether the pool should be allowed to host system components or not (defaults to true)
+ optional bool allow = 1;
+}
+
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go
new file mode 100644
index 0000000..c539b18
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go
@@ -0,0 +1,155 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ "fmt"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ConditionBuilder build a Condition.
+type ConditionBuilder interface {
+ WithOldCondition(old gardencorev1beta1.Condition) ConditionBuilder
+ WithStatus(status gardencorev1beta1.ConditionStatus) ConditionBuilder
+ WithReason(reason string) ConditionBuilder
+ WithMessage(message string) ConditionBuilder
+ WithCodes(codes ...gardencorev1beta1.ErrorCode) ConditionBuilder
+ WithNowFunc(now func() metav1.Time) ConditionBuilder
+ Build() (new gardencorev1beta1.Condition, updated bool)
+}
+
+// defaultConditionBuilder build a Condition.
+type defaultConditionBuilder struct {
+ old gardencorev1beta1.Condition
+ status gardencorev1beta1.ConditionStatus
+ conditionType gardencorev1beta1.ConditionType
+ reason string
+ message string
+ codes []gardencorev1beta1.ErrorCode
+ nowFunc func() metav1.Time
+}
+
+// NewConditionBuilder returns a ConditionBuilder for a specific condition.
+func NewConditionBuilder(conditionType gardencorev1beta1.ConditionType) (ConditionBuilder, error) {
+ if conditionType == "" {
+ return nil, fmt.Errorf("conditionType cannot be empty")
+ }
+
+ return &defaultConditionBuilder{
+ conditionType: conditionType,
+ nowFunc: metav1.Now,
+ }, nil
+}
+
+// WithOldCondition sets the old condition. It can be used to prodive default values.
+// The old's condition type is overridden to the one specified in the builder.
+func (b *defaultConditionBuilder) WithOldCondition(old gardencorev1beta1.Condition) ConditionBuilder {
+ old.Type = b.conditionType
+ b.old = old
+
+ return b
+}
+
+// WithStatus sets the status of the condition.
+func (b *defaultConditionBuilder) WithStatus(status gardencorev1beta1.ConditionStatus) ConditionBuilder {
+ b.status = status
+ return b
+}
+
+// WithReason sets the reason of the condition.
+func (b *defaultConditionBuilder) WithReason(reason string) ConditionBuilder {
+ b.reason = reason
+ return b
+}
+
+// WithMessage sets the message of the condition.
+func (b *defaultConditionBuilder) WithMessage(message string) ConditionBuilder {
+ b.message = message
+ return b
+}
+
+// WithCodes sets the codes of the condition.
+func (b *defaultConditionBuilder) WithCodes(codes ...gardencorev1beta1.ErrorCode) ConditionBuilder {
+ b.codes = codes
+ return b
+}
+
+// WithNowFunc sets the function used for getting the current time.
+// Should only be used for tests.
+func (b *defaultConditionBuilder) WithNowFunc(now func() metav1.Time) ConditionBuilder {
+ b.nowFunc = now
+ return b
+}
+
+// Build creates the condition and returns if there are modifications with the OldCondition.
+// If OldCondition is provided:
+// - Any changes to status set the `LastTransitionTime`
+// - Any updates to the message or the reason cause set `LastUpdateTime` to the current time.
+func (b *defaultConditionBuilder) Build() (new gardencorev1beta1.Condition, updated bool) {
+ var (
+ now = b.nowFunc()
+ emptyTime = metav1.Time{}
+ )
+
+ new = *b.old.DeepCopy()
+
+ if new.LastTransitionTime == emptyTime {
+ new.LastTransitionTime = now
+ }
+
+ if new.LastUpdateTime == emptyTime {
+ new.LastUpdateTime = now
+ }
+
+ new.Type = b.conditionType
+
+ if b.status != "" {
+ new.Status = b.status
+ } else if b.status == "" && b.old.Status == "" {
+ new.Status = gardencorev1beta1.ConditionUnknown
+ }
+
+ if b.reason != "" {
+ new.Reason = b.reason
+ } else if b.reason == "" && b.old.Reason == "" {
+ new.Reason = "ConditionInitialized"
+ }
+
+ if b.message != "" {
+ new.Message = b.message
+ } else if b.message == "" && b.old.Message == "" {
+ new.Message = "The condition has been initialized but its semantic check has not been performed yet."
+ }
+
+ if b.codes != nil {
+ new.Codes = b.codes
+ } else if b.codes == nil && b.old.Codes == nil {
+ new.Codes = nil
+ }
+
+ if new.Status != b.old.Status {
+ new.LastTransitionTime = now
+ }
+
+ if new.Reason != b.old.Reason || new.Message != b.old.Message {
+ new.LastUpdateTime = now
+ }
+
+ return new, !apiequality.Semantic.DeepEqual(new, b.old)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go
new file mode 100644
index 0000000..2694936
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go
@@ -0,0 +1,206 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+ "time"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ utilerrors "github.com/gardener/gardener/pkg/utils/errors"
+
+ errors2 "github.com/pkg/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// ErrorWithCodes contains error codes and an error message.
+type ErrorWithCodes struct {
+ message string
+ codes []gardencorev1beta1.ErrorCode
+}
+
+// NewErrorWithCodes creates a new error that additionally exposes the given codes via the Coder interface.
+func NewErrorWithCodes(message string, codes ...gardencorev1beta1.ErrorCode) error {
+ return &ErrorWithCodes{message, codes}
+}
+
+// Codes returns all error codes.
+func (e *ErrorWithCodes) Codes() []gardencorev1beta1.ErrorCode {
+ return e.codes
+}
+
+// Error returns the error message.
+func (e *ErrorWithCodes) Error() string {
+ return e.message
+}
+
+var (
+ unauthorizedRegexp = regexp.MustCompile(`(?i)(Unauthorized|InvalidClientTokenId|InvalidAuthenticationTokenTenant|SignatureDoesNotMatch|Authentication failed|AuthFailure|AuthorizationFailed|invalid character|invalid_grant|invalid_client|Authorization Profile was not found|cannot fetch token|no active subscriptions|InvalidAccessKeyId|InvalidSecretAccessKey|query returned no results|UnauthorizedOperation|not authorized)`)
+ quotaExceededRegexp = regexp.MustCompile(`(?i)(LimitExceeded|Quota|Throttling|Too many requests)`)
+ insufficientPrivilegesRegexp = regexp.MustCompile(`(?i)(AccessDenied|OperationNotAllowed|Error 403)`)
+ dependenciesRegexp = regexp.MustCompile(`(?i)(PendingVerification|Access Not Configured|accessNotConfigured|DependencyViolation|OptInRequired|DeleteConflict|Conflict|inactive billing state|ReadOnlyDisabledSubscription|is already being used|InUseSubnetCannotBeDeleted|VnetInUse|InUseRouteTableCannotBeDeleted|timeout while waiting for state to become|InvalidCidrBlock|already busy for|InsufficientFreeAddressesInSubnet|InternalServerError|RetryableError|Future#WaitForCompletion: context has been cancelled|internalerror|internal server error|A resource with the ID|VnetAddressSpaceCannotChangeDueToPeerings)`)
+ resourcesDepletedRegexp = regexp.MustCompile(`(?i)(not available in the current hardware cluster|InsufficientInstanceCapacity|SkuNotAvailable|ZonalAllocationFailed|out of stock)`)
+ configurationProblemRegexp = regexp.MustCompile(`(?i)(AzureBastionSubnet|not supported in your requested Availability Zone|InvalidParameter|InvalidParameterValue|not found|notFound|NetcfgInvalidSubnet|InvalidSubnet|Invalid value|KubeletHasInsufficientMemory|KubeletHasDiskPressure|KubeletHasInsufficientPID|violates constraint|no attached internet gateway found|Your query returned no results|PrivateEndpointNetworkPoliciesCannotBeEnabledOnPrivateEndpointSubnet|invalid VPC attributes|PrivateLinkServiceNetworkPoliciesCannotBeEnabledOnPrivateLinkServiceSubnet|unrecognized feature gate|runtime-config invalid key)`)
+)
+
+// DetermineError determines the Garden error code for the given error and creates a new error with the given message.
+func DetermineError(err error, message string) error {
+ if err == nil {
+ return errors.New(message)
+ }
+
+ errMsg := message
+ if errMsg == "" {
+ errMsg = err.Error()
+ }
+
+ codes := DetermineErrorCodes(err)
+ if codes == nil {
+ return errors.New(errMsg)
+ }
+ return &ErrorWithCodes{errMsg, codes}
+}
+
+// DetermineErrorCodes determines error codes based on the given error.
+func DetermineErrorCodes(err error) []gardencorev1beta1.ErrorCode {
+ var (
+ coder Coder
+ message = err.Error()
+ codes = sets.NewString()
+ )
+
+ // try to re-use codes from error
+ if errors.As(err, &coder) {
+ for _, code := range coder.Codes() {
+ codes.Insert(string(code))
+ }
+ }
+
+ // determine error codes
+ if unauthorizedRegexp.MatchString(message) {
+ codes.Insert(string(gardencorev1beta1.ErrorInfraUnauthorized))
+ }
+ if quotaExceededRegexp.MatchString(message) {
+ codes.Insert(string(gardencorev1beta1.ErrorInfraQuotaExceeded))
+ }
+ if insufficientPrivilegesRegexp.MatchString(message) {
+ codes.Insert(string(gardencorev1beta1.ErrorInfraInsufficientPrivileges))
+ }
+ if dependenciesRegexp.MatchString(message) {
+ codes.Insert(string(gardencorev1beta1.ErrorInfraDependencies))
+ }
+ if resourcesDepletedRegexp.MatchString(message) {
+ codes.Insert(string(gardencorev1beta1.ErrorInfraResourcesDepleted))
+ }
+ if configurationProblemRegexp.MatchString(message) {
+ codes.Insert(string(gardencorev1beta1.ErrorConfigurationProblem))
+ }
+
+ // compute error code list based on code string set
+ var out []gardencorev1beta1.ErrorCode
+ for _, c := range codes.List() {
+ out = append(out, gardencorev1beta1.ErrorCode(c))
+ }
+ return out
+}
+
+// Coder is an error that may produce a ErrorCodes visible to the outside.
+type Coder interface {
+ error
+ Codes() []gardencorev1beta1.ErrorCode
+}
+
+// ExtractErrorCodes extracts all error codes from the given error by using utilerrors.Errors
+func ExtractErrorCodes(err error) []gardencorev1beta1.ErrorCode {
+ var codes []gardencorev1beta1.ErrorCode
+ for _, err := range utilerrors.Errors(err) {
+ var coder Coder
+ if errors.As(err, &coder) {
+ codes = append(codes, coder.Codes()...)
+ }
+ }
+ return codes
+}
+
+// FormatLastErrDescription formats the error message string for the last occurred error.
+func FormatLastErrDescription(err error) string {
+ errString := err.Error()
+ if len(errString) > 0 {
+ errString = strings.ToUpper(string(errString[0])) + errString[1:]
+ }
+ return errString
+}
+
+// WrappedLastErrors is a structure which contains the general description of the lastErrors which occurred and an array of all lastErrors
+type WrappedLastErrors struct {
+ Description string
+ LastErrors []gardencorev1beta1.LastError
+}
+
+// NewWrappedLastErrors returns an error
+func NewWrappedLastErrors(description string, err error) *WrappedLastErrors {
+ var lastErrors []gardencorev1beta1.LastError
+
+ for _, partError := range utilerrors.Errors(err) {
+ lastErrors = append(lastErrors, *LastErrorWithTaskID(
+ partError.Error(),
+ utilerrors.GetID(partError),
+ ExtractErrorCodes(errors2.Cause(partError))...))
+ }
+
+ return &WrappedLastErrors{
+ Description: description,
+ LastErrors: lastErrors,
+ }
+}
+
+// LastError creates a new LastError with the given description, optional codes and sets timestamp when the error is lastly observed.
+func LastError(description string, codes ...gardencorev1beta1.ErrorCode) *gardencorev1beta1.LastError {
+ return &gardencorev1beta1.LastError{
+ Description: description,
+ Codes: codes,
+ LastUpdateTime: &metav1.Time{
+ Time: time.Now(),
+ },
+ }
+}
+
+// LastErrorWithTaskID creates a new LastError with the given description, the ID of the task when the error occurred, optional codes and sets timestamp when the error is lastly observed.
+func LastErrorWithTaskID(description string, taskID string, codes ...gardencorev1beta1.ErrorCode) *gardencorev1beta1.LastError {
+ return &gardencorev1beta1.LastError{
+ Description: description,
+ Codes: codes,
+ TaskID: &taskID,
+ LastUpdateTime: &metav1.Time{
+ Time: time.Now(),
+ },
+ }
+}
+
+// HasNonRetryableErrorCode returns true if at least one of given list of last errors has at least one error code that
+// indicates that an automatic retry would not help fixing the problem.
+func HasNonRetryableErrorCode(lastErrors ...gardencorev1beta1.LastError) bool {
+ for _, lastError := range lastErrors {
+ for _, code := range lastError.Codes {
+ if code == gardencorev1beta1.ErrorInfraUnauthorized || code == gardencorev1beta1.ErrorConfigurationProblem {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go
new file mode 100644
index 0000000..7849607
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go
@@ -0,0 +1,1198 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/utils/pointer"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ versionutils "github.com/gardener/gardener/pkg/utils/version"
+
+ "github.com/Masterminds/semver"
+ "github.com/pkg/errors"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// Now determines the current metav1.Time.
+var Now = metav1.Now
+
+// InitCondition initializes a new Condition with an Unknown status.
+func InitCondition(conditionType gardencorev1beta1.ConditionType) gardencorev1beta1.Condition {
+ now := Now()
+ return gardencorev1beta1.Condition{
+ Type: conditionType,
+ Status: gardencorev1beta1.ConditionUnknown,
+ Reason: "ConditionInitialized",
+ Message: "The condition has been initialized but its semantic check has not been performed yet.",
+ LastTransitionTime: now,
+ LastUpdateTime: now,
+ }
+}
+
+// GetCondition returns the condition with the given out of the list of .
+// In case the required type could not be found, it returns nil.
+func GetCondition(conditions []gardencorev1beta1.Condition, conditionType gardencorev1beta1.ConditionType) *gardencorev1beta1.Condition {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ c := condition
+ return &c
+ }
+ }
+ return nil
+}
+
+// GetOrInitCondition tries to retrieve the condition with the given condition type from the given conditions.
+// If the condition could not be found, it returns an initialized condition of the given type.
+func GetOrInitCondition(conditions []gardencorev1beta1.Condition, conditionType gardencorev1beta1.ConditionType) gardencorev1beta1.Condition {
+ if condition := GetCondition(conditions, conditionType); condition != nil {
+ return *condition
+ }
+ return InitCondition(conditionType)
+}
+
+// UpdatedCondition updates the properties of one specific condition.
+func UpdatedCondition(condition gardencorev1beta1.Condition, status gardencorev1beta1.ConditionStatus, reason, message string, codes ...gardencorev1beta1.ErrorCode) gardencorev1beta1.Condition {
+ var (
+ newCondition = gardencorev1beta1.Condition{
+ Type: condition.Type,
+ Status: status,
+ Reason: reason,
+ Message: message,
+ LastTransitionTime: condition.LastTransitionTime,
+ LastUpdateTime: condition.LastUpdateTime,
+ Codes: codes,
+ }
+ now = Now()
+ )
+
+ if condition.Status != status {
+ newCondition.LastTransitionTime = now
+ }
+
+ if condition.Reason != reason || condition.Message != message || !apiequality.Semantic.DeepEqual(condition.Codes, codes) {
+ newCondition.LastUpdateTime = now
+ }
+
+ return newCondition
+}
+
+// UpdatedConditionUnknownError updates the condition to 'Unknown' status and the message of the given error.
+func UpdatedConditionUnknownError(condition gardencorev1beta1.Condition, err error, codes ...gardencorev1beta1.ErrorCode) gardencorev1beta1.Condition {
+ return UpdatedConditionUnknownErrorMessage(condition, err.Error(), codes...)
+}
+
+// UpdatedConditionUnknownErrorMessage updates the condition with 'Unknown' status and the given message.
+func UpdatedConditionUnknownErrorMessage(condition gardencorev1beta1.Condition, message string, codes ...gardencorev1beta1.ErrorCode) gardencorev1beta1.Condition {
+ return UpdatedCondition(condition, gardencorev1beta1.ConditionUnknown, gardencorev1beta1.ConditionCheckError, message, codes...)
+}
+
+// MergeConditions merges the given with the . Existing conditions are superseded by
+// the (depending on the condition type).
+func MergeConditions(oldConditions []gardencorev1beta1.Condition, newConditions ...gardencorev1beta1.Condition) []gardencorev1beta1.Condition {
+ var (
+ out = make([]gardencorev1beta1.Condition, 0, len(oldConditions))
+ typeToIndex = make(map[gardencorev1beta1.ConditionType]int, len(oldConditions))
+ )
+
+ for i, condition := range oldConditions {
+ out = append(out, condition)
+ typeToIndex[condition.Type] = i
+ }
+
+ for _, condition := range newConditions {
+ if index, ok := typeToIndex[condition.Type]; ok {
+ out[index] = condition
+ continue
+ }
+ out = append(out, condition)
+ }
+
+ return out
+}
+
+// ConditionsNeedUpdate returns true if the must be updated based on .
+func ConditionsNeedUpdate(existingConditions, newConditions []gardencorev1beta1.Condition) bool {
+ return existingConditions == nil || !apiequality.Semantic.DeepEqual(newConditions, existingConditions)
+}
+
+// IsResourceSupported returns true if a given combination of kind/type is part of a controller resources list.
+func IsResourceSupported(resources []gardencorev1beta1.ControllerResource, resourceKind, resourceType string) bool {
+ for _, resource := range resources {
+ if resource.Kind == resourceKind && strings.EqualFold(resource.Type, resourceType) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsControllerInstallationSuccessful returns true if a ControllerInstallation has been marked as "successfully"
+// installed.
+func IsControllerInstallationSuccessful(controllerInstallation gardencorev1beta1.ControllerInstallation) bool {
+ var (
+ installed bool
+ healthy bool
+ )
+
+ for _, condition := range controllerInstallation.Status.Conditions {
+ if condition.Type == gardencorev1beta1.ControllerInstallationInstalled && condition.Status == gardencorev1beta1.ConditionTrue {
+ installed = true
+ }
+ if condition.Type == gardencorev1beta1.ControllerInstallationHealthy && condition.Status == gardencorev1beta1.ConditionTrue {
+ healthy = true
+ }
+ }
+
+ return installed && healthy
+}
+
+// IsControllerInstallationRequired returns true if a ControllerInstallation has been marked as "required".
+func IsControllerInstallationRequired(controllerInstallation gardencorev1beta1.ControllerInstallation) bool {
+ for _, condition := range controllerInstallation.Status.Conditions {
+ if condition.Type == gardencorev1beta1.ControllerInstallationRequired && condition.Status == gardencorev1beta1.ConditionTrue {
+ return true
+ }
+ }
+ return false
+}
+
+// ComputeOperationType checks the and determines whether it is Create, Delete, Reconcile, Migrate or Restore operation
+func ComputeOperationType(meta metav1.ObjectMeta, lastOperation *gardencorev1beta1.LastOperation) gardencorev1beta1.LastOperationType {
+ switch {
+ case meta.Annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationMigrate:
+ return gardencorev1beta1.LastOperationTypeMigrate
+ case meta.DeletionTimestamp != nil:
+ return gardencorev1beta1.LastOperationTypeDelete
+ case lastOperation == nil:
+ return gardencorev1beta1.LastOperationTypeCreate
+ case lastOperation.Type == gardencorev1beta1.LastOperationTypeCreate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded:
+ return gardencorev1beta1.LastOperationTypeCreate
+ case lastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded:
+ return gardencorev1beta1.LastOperationTypeMigrate
+ case lastOperation.Type == gardencorev1beta1.LastOperationTypeRestore && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded:
+ return gardencorev1beta1.LastOperationTypeRestore
+ }
+ return gardencorev1beta1.LastOperationTypeReconcile
+}
+
+// TaintsHave returns true if the given key is part of the taints list.
+func TaintsHave(taints []gardencorev1beta1.SeedTaint, key string) bool {
+ for _, taint := range taints {
+ if taint.Key == key {
+ return true
+ }
+ }
+ return false
+}
+
+// TaintsAreTolerated returns true when all the given taints are tolerated by the given tolerations.
+func TaintsAreTolerated(taints []gardencorev1beta1.SeedTaint, tolerations []gardencorev1beta1.Toleration) bool {
+ if len(taints) == 0 {
+ return true
+ }
+ if len(taints) > len(tolerations) {
+ return false
+ }
+
+ tolerationKeyValues := make(map[string]string, len(tolerations))
+ for _, toleration := range tolerations {
+ v := ""
+ if toleration.Value != nil {
+ v = *toleration.Value
+ }
+ tolerationKeyValues[toleration.Key] = v
+ }
+
+ for _, taint := range taints {
+ tolerationValue, ok := tolerationKeyValues[taint.Key]
+ if !ok {
+ return false
+ }
+ if taint.Value != nil && *taint.Value != tolerationValue {
+ return false
+ }
+ }
+
+ return true
+}
+
+type ShootedSeed struct {
+ DisableDNS *bool
+ DisableCapacityReservation *bool
+ Protected *bool
+ Visible *bool
+ LoadBalancerServicesAnnotations map[string]string
+ MinimumVolumeSize *string
+ APIServer *ShootedSeedAPIServer
+ BlockCIDRs []string
+ ShootDefaults *gardencorev1beta1.ShootNetworks
+ Backup *gardencorev1beta1.SeedBackup
+ SeedProviderConfig *runtime.RawExtension
+ IngressController *gardencorev1beta1.IngressController
+ NoGardenlet bool
+ UseServiceAccountBootstrapping bool
+ WithSecretRef bool
+ FeatureGates map[string]bool
+ Resources *ShootedSeedResources
+}
+
+type ShootedSeedAPIServer struct {
+ Replicas *int32
+ Autoscaler *ShootedSeedAPIServerAutoscaler
+}
+
+type ShootedSeedAPIServerAutoscaler struct {
+ MinReplicas *int32
+ MaxReplicas int32
+}
+
+type ShootedSeedResources struct {
+ Capacity corev1.ResourceList
+ Reserved corev1.ResourceList
+}
+
+func parseInt32(s string) (int32, error) {
+ i64, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i64), nil
+}
+
+func parseShootedSeed(annotation string) (*ShootedSeed, error) {
+ var (
+ flags = make(map[string]struct{})
+ settings = make(map[string]string)
+ )
+
+ for _, fragment := range strings.Split(annotation, ",") {
+ parts := strings.SplitN(fragment, "=", 2)
+ if len(parts) == 1 {
+ flags[fragment] = struct{}{}
+ continue
+ }
+
+ settings[parts[0]] = parts[1]
+ }
+
+ if _, ok := flags["true"]; !ok {
+ return nil, nil
+ }
+
+ shootedSeed := ShootedSeed{
+ LoadBalancerServicesAnnotations: parseShootedSeedLoadBalancerServicesAnnotations(settings),
+ FeatureGates: parseShootedSeedFeatureGates(settings),
+ }
+
+ apiServer, err := parseShootedSeedAPIServer(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.APIServer = apiServer
+
+ blockCIDRs, err := parseShootedSeedBlockCIDRs(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.BlockCIDRs = blockCIDRs
+
+ shootDefaults, err := parseShootedSeedShootDefaults(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.ShootDefaults = shootDefaults
+
+ backup, err := parseShootedSeedBackup(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.Backup = backup
+
+ seedProviderConfig, err := parseProviderConfig("providerConfig.", settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.SeedProviderConfig = seedProviderConfig
+
+ resources, err := parseShootedSeedResources(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.Resources = resources
+
+ ingressController, err := parseIngressController(settings)
+ if err != nil {
+ return nil, err
+ }
+ shootedSeed.IngressController = ingressController
+
+ if size, ok := settings["minimumVolumeSize"]; ok {
+ shootedSeed.MinimumVolumeSize = &size
+ }
+ if _, ok := flags["disable-dns"]; ok {
+ shootedSeed.DisableDNS = pointer.BoolPtr(true)
+ }
+ if _, ok := flags["disable-capacity-reservation"]; ok {
+ shootedSeed.DisableCapacityReservation = pointer.BoolPtr(true)
+ }
+ if _, ok := flags["no-gardenlet"]; ok {
+ shootedSeed.NoGardenlet = true
+ }
+ if _, ok := flags["use-serviceaccount-bootstrapping"]; ok {
+ shootedSeed.UseServiceAccountBootstrapping = true
+ }
+ if _, ok := flags["with-secret-ref"]; ok {
+ shootedSeed.WithSecretRef = true
+ }
+ if _, ok := flags["protected"]; ok {
+ shootedSeed.Protected = pointer.BoolPtr(true)
+ }
+ if _, ok := flags["unprotected"]; ok {
+ shootedSeed.Protected = pointer.BoolPtr(false)
+ }
+ if _, ok := flags["visible"]; ok {
+ shootedSeed.Visible = pointer.BoolPtr(true)
+ }
+ if _, ok := flags["invisible"]; ok {
+ shootedSeed.Visible = pointer.BoolPtr(false)
+ }
+
+ return &shootedSeed, nil
+}
+
+func parseShootedSeedBlockCIDRs(settings map[string]string) ([]string, error) {
+ cidrs, ok := settings["blockCIDRs"]
+ if !ok {
+ return nil, nil
+ }
+
+ return strings.Split(cidrs, ";"), nil
+}
+
+func parseShootedSeedShootDefaults(settings map[string]string) (*gardencorev1beta1.ShootNetworks, error) {
+ var (
+ podCIDR, ok1 = settings["shootDefaults.pods"]
+ serviceCIDR, ok2 = settings["shootDefaults.services"]
+ )
+
+ if !ok1 && !ok2 {
+ return nil, nil
+ }
+
+ shootNetworks := &gardencorev1beta1.ShootNetworks{}
+
+ if ok1 {
+ shootNetworks.Pods = &podCIDR
+ }
+
+ if ok2 {
+ shootNetworks.Services = &serviceCIDR
+ }
+
+ return shootNetworks, nil
+}
+
+func parseIngressController(settings map[string]string) (*gardencorev1beta1.IngressController, error) {
+ ingressController := &gardencorev1beta1.IngressController{}
+
+ kind, ok := settings["ingress.controller.kind"]
+ if !ok {
+ return nil, nil
+ }
+ ingressController.Kind = kind
+
+ parsedProviderConfig, err := parseProviderConfig("ingress.controller.providerConfig.", settings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing Ingress providerConfig failed: %s", err.Error())
+ }
+ ingressController.ProviderConfig = parsedProviderConfig
+
+ return ingressController, nil
+}
+
+func parseShootedSeedBackup(settings map[string]string) (*gardencorev1beta1.SeedBackup, error) {
+ var (
+ provider, ok1 = settings["backup.provider"]
+ region, ok2 = settings["backup.region"]
+ secretRefName, ok3 = settings["backup.secretRef.name"]
+ secretRefNamespace, ok4 = settings["backup.secretRef.namespace"]
+ )
+
+ if ok1 && provider == "none" {
+ return nil, nil
+ }
+
+ backup := &gardencorev1beta1.SeedBackup{}
+
+ if ok1 {
+ backup.Provider = provider
+ }
+ if ok2 {
+ backup.Region = ®ion
+ }
+ if ok3 {
+ backup.SecretRef.Name = secretRefName
+ }
+ if ok4 {
+ backup.SecretRef.Namespace = secretRefNamespace
+ }
+
+ return backup, nil
+}
+
+func parseShootedSeedFeatureGates(settings map[string]string) map[string]bool {
+ featureGates := make(map[string]bool)
+
+ for k, v := range settings {
+ if strings.HasPrefix(k, "featureGates.") {
+ val, _ := strconv.ParseBool(v)
+ featureGates[strings.Split(k, ".")[1]] = val
+ }
+ }
+
+ if len(featureGates) == 0 {
+ return nil
+ }
+
+ return featureGates
+}
+
+func parseProviderConfig(prefix string, settings map[string]string) (*runtime.RawExtension, error) {
+ // reconstruct providerConfig structure
+ providerConfig := map[string]interface{}{}
+
+ var err error
+ for k, v := range settings {
+ if strings.HasPrefix(k, prefix) {
+ var value interface{}
+ if strings.HasPrefix(v, `"`) && strings.HasSuffix(v, `"`) {
+ value, err = strconv.Unquote(v)
+ if err != nil {
+ return nil, err
+ }
+ } else if b, err := strconv.ParseBool(v); err == nil {
+ value = b
+ } else if f, err := strconv.ParseFloat(v, 64); err == nil {
+ value = f
+ } else {
+ value = v
+ }
+
+ path := strings.TrimPrefix(k, prefix)
+ if err := unstructured.SetNestedField(providerConfig, value, strings.Split(path, ".")...); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if len(providerConfig) == 0 {
+ return nil, nil
+ }
+
+ jsonStr, err := json.Marshal(providerConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return &runtime.RawExtension{
+ Raw: jsonStr,
+ }, nil
+}
+
+func parseShootedSeedLoadBalancerServicesAnnotations(settings map[string]string) map[string]string {
+ const optionPrefix = "loadBalancerServices.annotations."
+
+ annotations := make(map[string]string)
+ for k, v := range settings {
+ if strings.HasPrefix(k, optionPrefix) {
+ annotationKey := strings.TrimPrefix(k, optionPrefix)
+ annotations[annotationKey] = v
+ }
+ }
+
+ if len(annotations) == 0 {
+ return nil
+ }
+
+ return annotations
+}
+
+func parseShootedSeedAPIServer(settings map[string]string) (*ShootedSeedAPIServer, error) {
+ apiServerAutoscaler, err := parseShootedSeedAPIServerAutoscaler(settings)
+ if err != nil {
+ return nil, err
+ }
+
+ replicasString, ok := settings["apiServer.replicas"]
+ if !ok && apiServerAutoscaler == nil {
+ return nil, nil
+ }
+
+ var apiServer ShootedSeedAPIServer
+
+ apiServer.Autoscaler = apiServerAutoscaler
+
+ if ok {
+ replicas, err := parseInt32(replicasString)
+ if err != nil {
+ return nil, err
+ }
+
+ apiServer.Replicas = &replicas
+ }
+
+ return &apiServer, nil
+}
+
+func parseShootedSeedAPIServerAutoscaler(settings map[string]string) (*ShootedSeedAPIServerAutoscaler, error) {
+ minReplicasString, ok1 := settings["apiServer.autoscaler.minReplicas"]
+ maxReplicasString, ok2 := settings["apiServer.autoscaler.maxReplicas"]
+ if !ok1 && !ok2 {
+ return nil, nil
+ }
+ if !ok2 {
+ return nil, fmt.Errorf("apiSrvMaxReplicas has to be specified for shooted seed API server autoscaler")
+ }
+
+ var apiServerAutoscaler ShootedSeedAPIServerAutoscaler
+
+ if ok1 {
+ minReplicas, err := parseInt32(minReplicasString)
+ if err != nil {
+ return nil, err
+ }
+ apiServerAutoscaler.MinReplicas = &minReplicas
+ }
+
+ maxReplicas, err := parseInt32(maxReplicasString)
+ if err != nil {
+ return nil, err
+ }
+ apiServerAutoscaler.MaxReplicas = maxReplicas
+
+ return &apiServerAutoscaler, nil
+}
+
+func parseShootedSeedResources(settings map[string]string) (*ShootedSeedResources, error) {
+ var capacity, reserved corev1.ResourceList
+
+ for k, v := range settings {
+ var resourceName corev1.ResourceName
+ var quantity resource.Quantity
+ var err error
+ if strings.HasPrefix(k, "resources.capacity.") || strings.HasPrefix(k, "resources.reserved.") {
+ resourceName = corev1.ResourceName(strings.Split(k, ".")[2])
+ quantity, err = resource.ParseQuantity(v)
+ if err != nil {
+ return nil, err
+ }
+ if strings.HasPrefix(k, "resources.capacity.") {
+ if capacity == nil {
+ capacity = make(corev1.ResourceList)
+ }
+ capacity[resourceName] = quantity
+ } else {
+ if reserved == nil {
+ reserved = make(corev1.ResourceList)
+ }
+ reserved[resourceName] = quantity
+ }
+ }
+ }
+
+ if len(capacity) == 0 && len(reserved) == 0 {
+ return nil, nil
+ }
+ return &ShootedSeedResources{
+ Capacity: capacity,
+ Reserved: reserved,
+ }, nil
+}
+
+func validateShootedSeed(shootedSeed *ShootedSeed, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if shootedSeed.APIServer != nil {
+ allErrs = append(allErrs, validateShootedSeedAPIServer(shootedSeed.APIServer, fldPath.Child("apiServer"))...)
+ }
+ if shootedSeed.Resources != nil {
+ allErrs = append(allErrs, validateShootedSeedResources(shootedSeed.Resources, fldPath.Child("resources"))...)
+ }
+
+ return allErrs
+}
+
+func validateShootedSeedAPIServer(apiServer *ShootedSeedAPIServer, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if apiServer.Replicas != nil && *apiServer.Replicas < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), *apiServer.Replicas, "must be greater than 0"))
+ }
+ if apiServer.Autoscaler != nil {
+ allErrs = append(allErrs, validateShootedSeedAPIServerAutoscaler(apiServer.Autoscaler, fldPath.Child("autoscaler"))...)
+ }
+
+ return allErrs
+}
+
+func validateShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAutoscaler, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, "must be greater than 0"))
+ }
+ if autoscaler.MaxReplicas < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0"))
+ }
+ if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than or equal to `minReplicas`"))
+ }
+
+ return allErrs
+}
+
+func validateShootedSeedResources(resources *ShootedSeedResources, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for resourceName, quantity := range resources.Capacity {
+ if reservedQuantity, ok := resources.Reserved[resourceName]; ok && reservedQuantity.Value() > quantity.Value() {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("reserved", string(resourceName)), resources.Reserved[resourceName], "must be lower or equal to capacity"))
+ }
+ }
+ for resourceName := range resources.Reserved {
+ if _, ok := resources.Capacity[resourceName]; !ok {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("reserved", string(resourceName)), resources.Reserved[resourceName], "reserved without capacity"))
+ }
+ }
+
+ return allErrs
+}
+
+func setDefaults_ShootedSeed(shootedSeed *ShootedSeed) {
+ if shootedSeed.APIServer == nil {
+ shootedSeed.APIServer = &ShootedSeedAPIServer{}
+ }
+ setDefaults_ShootedSeedAPIServer(shootedSeed.APIServer)
+ if shootedSeed.Resources == nil {
+ shootedSeed.Resources = &ShootedSeedResources{}
+ }
+ setDefaults_ShootedSeedResources(shootedSeed.Resources)
+}
+
+func setDefaults_ShootedSeedAPIServer(apiServer *ShootedSeedAPIServer) {
+ if apiServer.Replicas == nil {
+ three := int32(3)
+ apiServer.Replicas = &three
+ }
+ if apiServer.Autoscaler == nil {
+ apiServer.Autoscaler = &ShootedSeedAPIServerAutoscaler{
+ MaxReplicas: 3,
+ }
+ }
+ setDefaults_ShootedSeedAPIServerAutoscaler(apiServer.Autoscaler)
+}
+
+func minInt32(a int32, b int32) int32 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func setDefaults_ShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAutoscaler) {
+ if autoscaler.MinReplicas == nil {
+ minReplicas := minInt32(3, autoscaler.MaxReplicas)
+ autoscaler.MinReplicas = &minReplicas
+ }
+}
+
+func setDefaults_ShootedSeedResources(resources *ShootedSeedResources) {
+ if _, ok := resources.Capacity[gardencorev1beta1.ResourceShoots]; !ok {
+ if resources.Capacity == nil {
+ resources.Capacity = make(corev1.ResourceList)
+ }
+ resources.Capacity[gardencorev1beta1.ResourceShoots] = resource.MustParse("250")
+ }
+}
+
+// ReadShootedSeed determines whether the Shoot has been marked to be registered automatically as a Seed cluster.
+func ReadShootedSeed(shoot *gardencorev1beta1.Shoot) (*ShootedSeed, error) {
+ if shoot.Namespace != v1beta1constants.GardenNamespace || shoot.Annotations == nil {
+ return nil, nil
+ }
+
+ val, ok := shoot.Annotations[v1beta1constants.AnnotationShootUseAsSeed]
+ if !ok {
+ return nil, nil
+ }
+
+ shootedSeed, err := parseShootedSeed(val)
+ if err != nil {
+ return nil, err
+ }
+
+ if shootedSeed == nil {
+ return nil, nil
+ }
+
+ setDefaults_ShootedSeed(shootedSeed)
+
+ if errs := validateShootedSeed(shootedSeed, nil); len(errs) > 0 {
+ return nil, errs.ToAggregate()
+ }
+
+ return shootedSeed, nil
+}
+
+// HibernationIsEnabled checks if the given shoot's desired state is hibernated.
+func HibernationIsEnabled(shoot *gardencorev1beta1.Shoot) bool {
+ return shoot.Spec.Hibernation != nil && shoot.Spec.Hibernation.Enabled != nil && *shoot.Spec.Hibernation.Enabled
+}
+
+// ShootWantsClusterAutoscaler checks if the given Shoot needs a cluster autoscaler.
+// This is determined by checking whether one of the Shoot workers has a different
+// Maximum than Minimum.
+func ShootWantsClusterAutoscaler(shoot *gardencorev1beta1.Shoot) (bool, error) {
+ for _, worker := range shoot.Spec.Provider.Workers {
+ if worker.Maximum > worker.Minimum {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ShootWantsVerticalPodAutoscaler checks if the given Shoot needs a VPA.
+func ShootWantsVerticalPodAutoscaler(shoot *gardencorev1beta1.Shoot) bool {
+ return shoot.Spec.Kubernetes.VerticalPodAutoscaler != nil && shoot.Spec.Kubernetes.VerticalPodAutoscaler.Enabled
+}
+
+// ShootIgnoresAlerts checks if the alerts for the annotated shoot cluster should be ignored.
+func ShootIgnoresAlerts(shoot *gardencorev1beta1.Shoot) bool {
+ ignore := false
+ if value, ok := shoot.Annotations[v1beta1constants.AnnotationShootIgnoreAlerts]; ok {
+ ignore, _ = strconv.ParseBool(value)
+ }
+ return ignore
+}
+
+// ShootWantsAlertManager checks if the given shoot specification requires an alert manager.
+func ShootWantsAlertManager(shoot *gardencorev1beta1.Shoot) bool {
+ return !ShootIgnoresAlerts(shoot) && shoot.Spec.Monitoring != nil && shoot.Spec.Monitoring.Alerting != nil && len(shoot.Spec.Monitoring.Alerting.EmailReceivers) > 0
+}
+
+// ShootWantsBasicAuthentication returns true if basic authentication is not configured or
+// if it is set explicitly to 'true'.
+func ShootWantsBasicAuthentication(shoot *gardencorev1beta1.Shoot) bool {
+ kubeAPIServerConfig := shoot.Spec.Kubernetes.KubeAPIServer
+ if kubeAPIServerConfig == nil {
+ return true
+ }
+ if kubeAPIServerConfig.EnableBasicAuthentication == nil {
+ return true
+ }
+ return *kubeAPIServerConfig.EnableBasicAuthentication
+}
+
+// ShootUsesUnmanagedDNS returns true if the shoot's DNS section is marked as 'unmanaged'.
+func ShootUsesUnmanagedDNS(shoot *gardencorev1beta1.Shoot) bool {
+ return shoot.Spec.DNS != nil && len(shoot.Spec.DNS.Providers) > 0 && shoot.Spec.DNS.Providers[0].Type != nil && *shoot.Spec.DNS.Providers[0].Type == "unmanaged"
+}
+
+// DetermineMachineImageForName finds the cloud specific machine images in the for the given and
+// region. In case it does not find the machine image with the , it returns false. Otherwise, true and the
+// cloud-specific machine image will be returned.
+func DetermineMachineImageForName(cloudProfile *gardencorev1beta1.CloudProfile, name string) (bool, gardencorev1beta1.MachineImage, error) {
+ for _, image := range cloudProfile.Spec.MachineImages {
+ if strings.EqualFold(image.Name, name) {
+ return true, image, nil
+ }
+ }
+ return false, gardencorev1beta1.MachineImage{}, nil
+}
+
+// ShootMachineImageVersionExists checks if the shoot machine image (name, version) exists in the machine image constraint and returns true if yes and the index in the versions slice
+func ShootMachineImageVersionExists(constraint gardencorev1beta1.MachineImage, image gardencorev1beta1.ShootMachineImage) (bool, int) {
+ if constraint.Name != image.Name {
+ return false, 0
+ }
+
+ for index, v := range constraint.Versions {
+ if image.Version != nil && v.Version == *image.Version {
+ return true, index
+ }
+ }
+
+ return false, 0
+}
+
+func toExpirableVersions(versions []gardencorev1beta1.MachineImageVersion) []gardencorev1beta1.ExpirableVersion {
+ expVersions := []gardencorev1beta1.ExpirableVersion{}
+ for _, version := range versions {
+ expVersions = append(expVersions, version.ExpirableVersion)
+ }
+ return expVersions
+}
+
+// GetLatestQualifyingShootMachineImage determines the latest qualifying version in a machine image and returns that as a ShootMachineImage
+// A version qualifies if its classification is not preview and the version is not expired.
+func GetLatestQualifyingShootMachineImage(image gardencorev1beta1.MachineImage, predicates ...VersionPredicate) (bool, *gardencorev1beta1.ShootMachineImage, error) {
+ predicates = append(predicates, FilterExpiredVersion())
+ qualifyingVersionFound, latestImageVersion, err := GetLatestQualifyingVersion(toExpirableVersions(image.Versions), predicates...)
+ if err != nil {
+ return false, nil, err
+ }
+ if !qualifyingVersionFound {
+ return false, nil, nil
+ }
+ return true, &gardencorev1beta1.ShootMachineImage{Name: image.Name, Version: &latestImageVersion.Version}, nil
+}
+
+// SystemComponentsAllowed checks if the given worker allows system components to be scheduled onto it
+func SystemComponentsAllowed(worker *gardencorev1beta1.Worker) bool {
+ return worker.SystemComponents == nil || worker.SystemComponents.Allow
+}
+
+// UpdateMachineImages updates the machine images in place.
+func UpdateMachineImages(workers []gardencorev1beta1.Worker, machineImages []*gardencorev1beta1.ShootMachineImage) {
+ for _, machineImage := range machineImages {
+ for idx, worker := range workers {
+ if worker.Machine.Image != nil && machineImage.Name == worker.Machine.Image.Name {
+ workers[idx].Machine.Image = machineImage
+ }
+ }
+ }
+}
+
+// KubernetesVersionExistsInCloudProfile checks if the given Kubernetes version exists in the CloudProfile
+func KubernetesVersionExistsInCloudProfile(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, gardencorev1beta1.ExpirableVersion, error) {
+ for _, version := range cloudProfile.Spec.Kubernetes.Versions {
+ ok, err := versionutils.CompareVersions(version.Version, "=", currentVersion)
+ if err != nil {
+ return false, gardencorev1beta1.ExpirableVersion{}, err
+ }
+ if ok {
+ return true, version, nil
+ }
+ }
+ return false, gardencorev1beta1.ExpirableVersion{}, nil
+}
+
+// SetMachineImageVersionsToMachineImage sets imageVersions to the matching imageName in the machineImages.
+func SetMachineImageVersionsToMachineImage(machineImages []gardencorev1beta1.MachineImage, imageName string, imageVersions []gardencorev1beta1.MachineImageVersion) ([]gardencorev1beta1.MachineImage, error) {
+ for index, image := range machineImages {
+ if strings.EqualFold(image.Name, imageName) {
+ machineImages[index].Versions = imageVersions
+ return machineImages, nil
+ }
+ }
+ return nil, fmt.Errorf("machine image with name '%s' could not be found", imageName)
+}
+
+// GetDefaultMachineImageFromCloudProfile gets the first MachineImage from the CloudProfile
+func GetDefaultMachineImageFromCloudProfile(profile gardencorev1beta1.CloudProfile) *gardencorev1beta1.MachineImage {
+ if len(profile.Spec.MachineImages) == 0 {
+ return nil
+ }
+ return &profile.Spec.MachineImages[0]
+}
+
+// WrapWithLastError is wrapper function for gardencorev1beta1.LastError
+func WrapWithLastError(err error, lastError *gardencorev1beta1.LastError) error {
+ if err == nil || lastError == nil {
+ return err
+ }
+ return errors.Wrapf(err, "last error: %s", lastError.Description)
+}
+
+// IsAPIServerExposureManaged returns true, if the Object is managed by Gardener for API server exposure.
+// This indicates to extensions that they should not mutate the object.
+// Gardener marks the kube-apiserver Service and Deployment as managed by it when it uses SNI to expose them.
+func IsAPIServerExposureManaged(obj metav1.Object) bool {
+ if obj == nil {
+ return false
+ }
+
+ if v, found := obj.GetLabels()[v1beta1constants.LabelAPIServerExposure]; found &&
+ v == v1beta1constants.LabelAPIServerExposureGardenerManaged {
+ return true
+ }
+
+ return false
+}
+
+// FindPrimaryDNSProvider finds the primary provider among the given `providers`.
+// It returns the first provider in case no primary provider is available or the first one if multiple candidates are found.
+func FindPrimaryDNSProvider(providers []gardencorev1beta1.DNSProvider) *gardencorev1beta1.DNSProvider {
+ for _, provider := range providers {
+ if provider.Primary != nil && *provider.Primary {
+ primaryProvider := provider
+ return &primaryProvider
+ }
+ }
+ return nil
+}
+
+type VersionPredicate func(expirableVersion gardencorev1beta1.ExpirableVersion, version *semver.Version) (bool, error)
+
+// GetKubernetesVersionForPatchUpdate finds the latest Kubernetes patch version for its minor version in the compared
+// to the given . Preview and expired versions do not qualify for the kubernetes patch update. In case it does not find a newer patch version, it returns false. Otherwise,
+// true and the found version will be returned.
+func GetKubernetesVersionForPatchUpdate(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) {
+ currentSemVerVersion, err := semver.NewVersion(currentVersion)
+ if err != nil {
+ return false, "", err
+ }
+
+ qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterDifferentMajorMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion), FilterExpiredVersion())
+ if err != nil {
+ return false, "", err
+ }
+ // latest version cannot be found. Do not return an error, but allow for minor upgrade if Shoot's machine image version is expired.
+ if !qualifyingVersionFound {
+ return false, "", nil
+ }
+
+ return true, latestVersion.Version, nil
+}
+
+// GetKubernetesVersionForMinorUpdate finds a Kubernetes version in the that qualifies for a Kubernetes minor level update given a .
+// A qualifying version is a non-preview version having the minor version increased by exactly one version.
+// In case the consecutive minor version has only expired versions, picks the latest expired version (will do another minor update during the next maintenance time)
+// If a version can be found, returns true and the qualifying patch version of the next minor version.
+// In case it does not find a version, it returns false.
+func GetKubernetesVersionForMinorUpdate(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) {
+ currentSemVerVersion, err := semver.NewVersion(currentVersion)
+ if err != nil {
+ return false, "", err
+ }
+
+ qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterNonConsecutiveMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion), FilterExpiredVersion())
+ if err != nil {
+ return false, "", err
+ }
+ if !qualifyingVersionFound {
+ // in case there are only expired versions in the consecutive minor version, pick the latest expired version
+ qualifyingVersionFound, latestVersion, err = GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterNonConsecutiveMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion))
+ if err != nil {
+ return false, "", err
+ }
+ if !qualifyingVersionFound {
+ return false, "", nil
+ }
+ }
+
+ return true, latestVersion.Version, nil
+}
+
+// GetLatestQualifyingVersion returns the latest expirable version from a set of expirable versions
+// A version qualifies if its classification is not preview and the optional predicate does not filter out the version.
+// If the predicate returns true, the version is not considered for the latest qualifying version.
+func GetLatestQualifyingVersion(versions []gardencorev1beta1.ExpirableVersion, predicate ...VersionPredicate) (qualifyingVersionFound bool, latest *gardencorev1beta1.ExpirableVersion, err error) {
+ latestSemanticVersion := &semver.Version{}
+ var latestVersion *gardencorev1beta1.ExpirableVersion
+OUTER:
+ for _, v := range versions {
+ if v.Classification != nil && *v.Classification == gardencorev1beta1.ClassificationPreview {
+ continue
+ }
+
+ semver, err := semver.NewVersion(v.Version)
+ if err != nil {
+ return false, nil, fmt.Errorf("error while parsing version '%s': %s", v.Version, err.Error())
+ }
+
+ for _, p := range predicate {
+ if p == nil {
+ continue
+ }
+
+ shouldFilter, err := p(v, semver)
+ if err != nil {
+ return false, nil, fmt.Errorf("error while evaluation predicate: '%s'", err.Error())
+ }
+ if shouldFilter {
+ continue OUTER
+ }
+ }
+
+ if semver.GreaterThan(latestSemanticVersion) {
+ latestSemanticVersion = semver
+ // avoid DeepCopy
+ latest := v
+ latestVersion = &latest
+ }
+ }
+ // unable to find qualified versions
+ if latestVersion == nil {
+ return false, nil, nil
+ }
+ return true, latestVersion, nil
+}
+
+// FilterDifferentMajorMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a different same major.minor version compared to the currentSemVerVersion
+// returns true if v has a different major.minor version
+func FilterDifferentMajorMinorVersion(currentSemVerVersion semver.Version) VersionPredicate {
+ return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) {
+ isWithinRange, err := versionutils.CompareVersions(v.String(), "~", currentSemVerVersion.String())
+ if err != nil {
+ return true, err
+ }
+ return !isWithinRange, nil
+ }
+}
+
+// FilterNonConsecutiveMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a consecutive minor version compared to the currentSemVerVersion
+// returns true if v does not have a consecutive minor version
+func FilterNonConsecutiveMinorVersion(currentSemVerVersion semver.Version) VersionPredicate {
+ return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) {
+ isWithinRange, err := versionutils.CompareVersions(v.String(), "^", currentSemVerVersion.String())
+ if err != nil {
+ return true, err
+ }
+
+ if !isWithinRange {
+ return true, nil
+ }
+
+ hasIncorrectMinor := currentSemVerVersion.Minor()+1 != v.Minor()
+ return hasIncorrectMinor, nil
+ }
+}
+
+// FilterSameVersion returns a VersionPredicate(closure) that evaluates whether a given version v is equal to the currentSemVerVersion
+// returns true it it is equal
+func FilterSameVersion(currentSemVerVersion semver.Version) VersionPredicate {
+ return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) {
+ return v.Equal(¤tSemVerVersion), nil
+ }
+}
+
+// FilterLowerVersion returns a VersionPredicate(closure) that evaluates whether a given version v is lower than the currentSemVerVersion
+// returns true if it is lower
+func FilterLowerVersion(currentSemVerVersion semver.Version) VersionPredicate {
+ return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) {
+ return v.LessThan(¤tSemVerVersion), nil
+ }
+}
+
+// FilterExpiredVersion returns a closure that evaluates whether a given expirable version is expired
+// returns true it it is expired
+func FilterExpiredVersion() func(expirableVersion gardencorev1beta1.ExpirableVersion, version *semver.Version) (bool, error) {
+ return func(expirableVersion gardencorev1beta1.ExpirableVersion, _ *semver.Version) (bool, error) {
+ return expirableVersion.ExpirationDate != nil && (time.Now().UTC().After(expirableVersion.ExpirationDate.UTC()) || time.Now().UTC().Equal(expirableVersion.ExpirationDate.UTC())), nil
+ }
+}
+
+// GetResourceByName returns the first NamedResourceReference with the given name in the given slice, or nil if not found.
+func GetResourceByName(resources []gardencorev1beta1.NamedResourceReference, name string) *gardencorev1beta1.NamedResourceReference {
+ for _, resource := range resources {
+ if resource.Name == name {
+ return &resource
+ }
+ }
+ return nil
+}
+
+// UpsertLastError adds a 'last error' to the given list of existing 'last errors' if it does not exist yet. Otherwise,
+// it updates it.
+func UpsertLastError(lastErrors []gardencorev1beta1.LastError, lastError gardencorev1beta1.LastError) []gardencorev1beta1.LastError {
+ var (
+ out []gardencorev1beta1.LastError
+ found bool
+ )
+
+ for _, lastErr := range lastErrors {
+ if lastErr.TaskID != nil && lastError.TaskID != nil && *lastErr.TaskID == *lastError.TaskID {
+ out = append(out, lastError)
+ found = true
+ } else {
+ out = append(out, lastErr)
+ }
+ }
+
+ if !found {
+ out = append(out, lastError)
+ }
+
+ return out
+}
+
+// DeleteLastErrorByTaskID removes the 'last error' with the given task ID from the given 'last error' list.
+func DeleteLastErrorByTaskID(lastErrors []gardencorev1beta1.LastError, taskID string) []gardencorev1beta1.LastError {
+ var out []gardencorev1beta1.LastError
+ for _, lastErr := range lastErrors {
+ if lastErr.TaskID == nil || taskID != *lastErr.TaskID {
+ out = append(out, lastErr)
+ }
+ }
+ return out
+}
+
+// ShootItems provides helper functions with ShootLists
+type ShootItems gardencorev1beta1.ShootList
+
+// Union returns a set of Shoots that presents either in s or shootList
+func (s *ShootItems) Union(shootItems *ShootItems) []gardencorev1beta1.Shoot {
+ unionedShoots := make(map[string]gardencorev1beta1.Shoot)
+ for _, s := range s.Items {
+ unionedShoots[objectKey(s.Namespace, s.Name)] = s
+ }
+
+ for _, s := range shootItems.Items {
+ unionedShoots[objectKey(s.Namespace, s.Name)] = s
+ }
+
+ shoots := make([]gardencorev1beta1.Shoot, 0, len(unionedShoots))
+ for _, v := range unionedShoots {
+ shoots = append(shoots, v)
+ }
+
+ return shoots
+}
+
+func objectKey(namesapce, name string) string {
+ return fmt.Sprintf("%s/%s", namesapce, name)
+}
+
+// GetPurpose returns the purpose of the shoot or 'evaluation' if it's nil.
+func GetPurpose(s *gardencorev1beta1.Shoot) gardencorev1beta1.ShootPurpose {
+ if v := s.Spec.Purpose; v != nil {
+ return *v
+ }
+ return gardencorev1beta1.ShootPurposeEvaluation
+}
+
+// KubernetesDashboardEnabled returns true if the kubernetes-dashboard addon is enabled in the Shoot manifest.
+func KubernetesDashboardEnabled(addons *gardencorev1beta1.Addons) bool {
+ return addons != nil && addons.KubernetesDashboard != nil && addons.KubernetesDashboard.Enabled
+}
+
+// NginxIngressEnabled returns true if the nginx-ingress addon is enabled in the Shoot manifest.
+func NginxIngressEnabled(addons *gardencorev1beta1.Addons) bool {
+ return addons != nil && addons.NginxIngress != nil && addons.NginxIngress.Enabled
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go
new file mode 100644
index 0000000..1692e76
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the name of the core API group.
+const GroupName = "core.gardener.cloud"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind.
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource.
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is a new Scheme Builder which registers our API.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs)
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a reference to the Scheme Builder's AddToScheme function.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &BackupBucket{},
+ &BackupBucketList{},
+ &BackupEntry{},
+ &BackupEntryList{},
+ &CloudProfile{},
+ &CloudProfileList{},
+ &ControllerRegistration{},
+ &ControllerRegistrationList{},
+ &ControllerInstallation{},
+ &ControllerInstallationList{},
+ &Plant{},
+ &PlantList{},
+ &Project{},
+ &ProjectList{},
+ &Quota{},
+ &QuotaList{},
+ &SecretBinding{},
+ &SecretBindingList{},
+ &Seed{},
+ &SeedList{},
+ &Shoot{},
+ &ShootList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go
new file mode 100644
index 0000000..4548e90
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+const (
+ // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds'
+ // status using Lease resources for each Seed
+ GardenerSeedLeaseNamespace = "gardener-system-seed-lease"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go
new file mode 100644
index 0000000..8933604
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucket holds details about backup bucket
+type BackupBucket struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the Backup Bucket.
+ Spec BackupBucketSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the Backup Bucket.
+ Status BackupBucketStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucketList is a list of BackupBucket objects.
+type BackupBucketList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of BackupBucket.
+ Items []BackupBucket `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// BackupBucketSpec is the specification of a Backup Bucket.
+type BackupBucketSpec struct {
+ // Provider hold the details of cloud provider of the object store.
+ Provider BackupBucketProvider `json:"provider" protobuf:"bytes,1,opt,name=provider"`
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
+ // SeedName holds the name of the seed allocated to BackupBucket for running controller.
+ // +optional
+ SeedName *string `json:"seedName,omitempty" protobuf:"bytes,4,opt,name=seedName"`
+}
+
+// BackupBucketStatus holds the most recently observed status of the Backup Bucket.
+type BackupBucketStatus struct {
+ // ProviderStatus is the configuration passed to BackupBucket resource.
+ // +optional
+ ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,1,opt,name=providerStatus"`
+ // LastOperation holds information about the last operation on the BackupBucket.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,2,opt,name=lastOperation"`
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,3,opt,name=lastError"`
+ // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the
+ // BackupBucket's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"`
+ // GeneratedSecretRef is reference to the secret generated by backup bucket, which
+ // will have object store specific credentials.
+ // +optional
+ GeneratedSecretRef *corev1.SecretReference `json:"generatedSecretRef,omitempty" protobuf:"bytes,5,opt,name=generatedSecretRef"`
+}
+
+// BackupBucketProvider holds the details of cloud provider of the object store.
+type BackupBucketProvider struct {
+ // Type is the type of provider.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // Region is the region of the bucket.
+ Region string `json:"region" protobuf:"bytes,2,opt,name=region"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go
new file mode 100644
index 0000000..039e03b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // BackupEntryForceDeletion is a constant for an annotation on a BackupEntry indicating that it should be force deleted.
+ BackupEntryForceDeletion = "backupentry.core.gardener.cloud/force-deletion"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntry holds details about shoot backup.
+type BackupEntry struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of the Backup Entry.
+ // +optional
+ Spec BackupEntrySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the most recently observed status of the Backup Entry.
+ // +optional
+ Status BackupEntryStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntryList is a list of BackupEntry objects.
+type BackupEntryList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of BackupEntry.
+ Items []BackupEntry `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// BackupEntrySpec is the specification of a Backup Entry.
+type BackupEntrySpec struct {
+ // BucketName is the name of backup bucket for this Backup Entry.
+ BucketName string `json:"bucketName" protobuf:"bytes,1,opt,name=bucketName"`
+ // SeedName holds the name of the seed allocated to BackupEntry for running controller.
+ // +optional
+ SeedName *string `json:"seedName,omitempty" protobuf:"bytes,2,opt,name=seedName"`
+}
+
+// BackupEntryStatus holds the most recently observed status of the Backup Entry.
+type BackupEntryStatus struct {
+ // LastOperation holds information about the last operation on the BackupEntry.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,1,opt,name=lastOperation"`
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,2,opt,name=lastError"`
+ // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the
+ // BackupEntry's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go
new file mode 100644
index 0000000..fa0a422
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go
@@ -0,0 +1,226 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CloudProfile represents certain properties about a provider environment.
+type CloudProfile struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec defines the provider environment properties.
+ // +optional
+ Spec CloudProfileSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CloudProfileList is a collection of CloudProfiles.
+type CloudProfileList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of CloudProfiles.
+ Items []CloudProfile `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// CloudProfileSpec is the specification of a CloudProfile.
+// It must contain exactly one of its defined keys.
+type CloudProfileSpec struct {
+ // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile.
+ // +optional
+ CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"`
+ // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+ Kubernetes KubernetesSettings `json:"kubernetes" protobuf:"bytes,2,opt,name=kubernetes"`
+ // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ MachineImages []MachineImage `json:"machineImages" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,3,rep,name=machineImages"`
+ // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ MachineTypes []MachineType `json:"machineTypes" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=machineTypes"`
+ // ProviderConfig contains provider-specific configuration for the profile.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,5,opt,name=providerConfig"`
+ // Regions contains constraints regarding allowed values for regions and zones.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Regions []Region `json:"regions" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=regions"`
+ // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile.
+ // An empty list means that all seeds of the same provider type are supported.
+ // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes.
+ // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider
+ // type of the seed must match the shoot's provider.
+ // +optional
+ SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,7,opt,name=seedSelector"`
+ // Type is the name of the provider.
+ Type string `json:"type" protobuf:"bytes,8,opt,name=type"`
+ // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ VolumeTypes []VolumeType `json:"volumeTypes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeTypes"`
+}
+
+// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile
+type SeedSelector struct {
+ // LabelSelector is optional and can be used to select seeds by their label settings
+ // +optional
+ *metav1.LabelSelector `json:",inline,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+ // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type.
+ // +optional
+ ProviderTypes []string `json:"providerTypes,omitempty" protobuf:"bytes,2,rep,name=providerTypes"`
+}
+
+// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification.
+type KubernetesSettings struct {
+ // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters.
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ // +optional
+ Versions []ExpirableVersion `json:"versions,omitempty" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,1,rep,name=versions"`
+}
+
+// MachineImage defines the name and multiple versions of the machine image in any environment.
+type MachineImage struct {
+ // Name is the name of the image.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Versions contains versions, expiration dates and container runtimes of the machine image
+ // +patchMergeKey=version
+ // +patchStrategy=merge
+ Versions []MachineImageVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,2,rep,name=versions"`
+}
+
+// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces
+type MachineImageVersion struct {
+ ExpirableVersion `json:",inline" protobuf:"bytes,1,opt,name=expirableVersion"`
+ // CRI list of supported container runtime and interfaces supported by this version
+ // +optional
+ CRI []CRI `json:"cri,omitempty" protobuf:"bytes,2,rep,name=cri"`
+}
+
+// ExpirableVersion contains a version and an expiration date.
+type ExpirableVersion struct {
+ // Version is the version identifier.
+ Version string `json:"version" protobuf:"bytes,1,opt,name=version"`
+ // ExpirationDate defines the time at which this version expires.
+ // +optional
+ ExpirationDate *metav1.Time `json:"expirationDate,omitempty" protobuf:"bytes,2,opt,name=expirationDate"`
+ // Classification defines the state of a version (preview, supported, deprecated)
+ // +optional
+ Classification *VersionClassification `json:"classification,omitempty" protobuf:"bytes,3,opt,name=classification,casttype=VersionClassification"`
+}
+
+// MachineType contains certain properties of a machine type.
+type MachineType struct {
+ // CPU is the number of CPUs for this machine type.
+ CPU resource.Quantity `json:"cpu" protobuf:"bytes,1,opt,name=cpu"`
+ // GPU is the number of GPUs for this machine type.
+ GPU resource.Quantity `json:"gpu" protobuf:"bytes,2,opt,name=gpu"`
+ // Memory is the amount of memory for this machine type.
+ Memory resource.Quantity `json:"memory" protobuf:"bytes,3,opt,name=memory"`
+ // Name is the name of the machine type.
+ Name string `json:"name" protobuf:"bytes,4,opt,name=name"`
+ // Storage is the amount of storage associated with the root volume of this machine type.
+ // +optional
+ Storage *MachineTypeStorage `json:"storage,omitempty" protobuf:"bytes,5,opt,name=storage"`
+ // Usable defines if the machine type can be used for shoot clusters.
+ // +optional
+ Usable *bool `json:"usable,omitempty" protobuf:"varint,6,opt,name=usable"`
+}
+
+// MachineTypeStorage is the amount of storage associated with the root volume of this machine type.
+type MachineTypeStorage struct {
+ // Class is the class of the storage type.
+ Class string `json:"class" protobuf:"bytes,1,opt,name=class"`
+ // StorageSize is the storage size.
+ StorageSize resource.Quantity `json:"size" protobuf:"bytes,2,opt,name=size"`
+ // Type is the type of the storage.
+ Type string `json:"type" protobuf:"bytes,3,opt,name=type"`
+}
+
+// Region contains certain properties of a region.
+type Region struct {
+ // Name is a region name.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Zones is a list of availability zones in this region.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Zones []AvailabilityZone `json:"zones,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=zones"`
+ // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region.
+ // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt
+ // quality, reliability, access restrictions, etc.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,3,rep,name=labels"`
+}
+
+// AvailabilityZone is an availability zone.
+type AvailabilityZone struct {
+ // Name is an an availability zone name.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // UnavailableMachineTypes is a list of machine type names that are not availability in this zone.
+ // +optional
+ UnavailableMachineTypes []string `json:"unavailableMachineTypes,omitempty" protobuf:"bytes,2,rep,name=unavailableMachineTypes"`
+ // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone.
+ // +optional
+ UnavailableVolumeTypes []string `json:"unavailableVolumeTypes,omitempty" protobuf:"bytes,3,rep,name=unavailableVolumeTypes"`
+}
+
+// VolumeType contains certain properties of a volume type.
+type VolumeType struct {
+ // Class is the class of the volume type.
+ Class string `json:"class" protobuf:"bytes,1,opt,name=class"`
+ // Name is the name of the volume type.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+ // Usable defines if the volume type can be used for shoot clusters.
+ // +optional
+ Usable *bool `json:"usable,omitempty" protobuf:"varint,3,opt,name=usable"`
+}
+
+const (
+ // VolumeClassStandard is a constant for the standard volume class.
+ VolumeClassStandard string = "standard"
+ // VolumeClassPremium is a constant for the premium volume class.
+ VolumeClassPremium string = "premium"
+)
+
+// VersionClassification is the logical state of a version according to https://github.com/gardener/gardener/blob/master/docs/operations/versioning.md
+type VersionClassification string
+
+const (
+ // ClassificationPreview indicates that a version has recently been added and not promoted to "Supported" yet.
+ // ClassificationPreview versions will not be considered for automatic Kubernetes and Machine Image patch version updates.
+ ClassificationPreview VersionClassification = "preview"
+ // ClassificationSupported indicates that a patch version is the recommended version for a shoot.
+ // Using VersionMaintenance (see: https://github.com/gardener/gardener/docs/operation/versioning.md) there is one supported version per maintained minor version.
+ // Supported versions are eligible for the automated Kubernetes and Machine image patch version update for shoot clusters in Gardener.
+ ClassificationSupported VersionClassification = "supported"
+ // ClassificationDeprecated indicates that a patch version should not be used anymore, should be updated to a new version
+ // and will eventually expire.
+ ClassificationDeprecated VersionClassification = "deprecated"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go
new file mode 100644
index 0000000..9122894
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// ErrorCode is a string alias.
+type ErrorCode string
+
+const (
+ // ErrorInfraUnauthorized indicates that the last error occurred due to invalid infrastructure credentials.
+ ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED"
+ // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient infrastructure privileges.
+ ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES"
+ // ErrorInfraQuotaExceeded indicates that the last error occurred due to infrastructure quota limits.
+ ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED"
+ // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the infrastructure level.
+ ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES"
+ // ErrorInfraResourcesDepleted indicates that the last error occurred due to depleted resource in the infrastructure.
+ ErrorInfraResourcesDepleted ErrorCode = "ERR_INFRA_RESOURCES_DEPLETED"
+ // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster that are stuck in deletion.
+ ErrorCleanupClusterResources ErrorCode = "ERR_CLEANUP_CLUSTER_RESOURCES"
+ // ErrorConfigurationProblem indicates that the last error occurred due to a configuration problem.
+ ErrorConfigurationProblem ErrorCode = "ERR_CONFIGURATION_PROBLEM"
+)
+
+// LastError indicates the last occurred error for an operation on a resource.
+type LastError struct {
+ // A human readable message indicating details about the last error.
+ Description string `json:"description" protobuf:"bytes,1,opt,name=description"`
+ // ID of the task which caused this last error
+ // +optional
+ TaskID *string `json:"taskID,omitempty" protobuf:"bytes,2,opt,name=taskID"`
+ // Well-defined error codes of the last error(s).
+ // +optional
+ Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,3,rep,name=codes,casttype=ErrorCode"`
+ // Last time the error was reported
+ // +optional
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"`
+}
+
+// LastOperationType is a string alias.
+type LastOperationType string
+
+const (
+ // LastOperationTypeCreate indicates a 'create' operation.
+ LastOperationTypeCreate LastOperationType = "Create"
+ // LastOperationTypeReconcile indicates a 'reconcile' operation.
+ LastOperationTypeReconcile LastOperationType = "Reconcile"
+ // LastOperationTypeDelete indicates a 'delete' operation.
+ LastOperationTypeDelete LastOperationType = "Delete"
+ // LastOperationTypeMigrate indicates a 'migrate' operation.
+ LastOperationTypeMigrate LastOperationType = "Migrate"
+ // LastOperationTypeRestore indicates a 'restore' operation.
+ LastOperationTypeRestore LastOperationType = "Restore"
+)
+
+// LastOperationState is a string alias.
+type LastOperationState string
+
+const (
+ // LastOperationStateProcessing indicates that an operation is ongoing.
+ LastOperationStateProcessing LastOperationState = "Processing"
+ // LastOperationStateSucceeded indicates that an operation has completed successfully.
+ LastOperationStateSucceeded LastOperationState = "Succeeded"
+ // LastOperationStateError indicates that an operation is completed with errors and will be retried.
+ LastOperationStateError LastOperationState = "Error"
+ // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried.
+ LastOperationStateFailed LastOperationState = "Failed"
+ // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future.
+ LastOperationStatePending LastOperationState = "Pending"
+ // LastOperationStateAborted indicates that an operation has been aborted.
+ LastOperationStateAborted LastOperationState = "Aborted"
+)
+
+// LastOperation indicates the type and the state of the last operation, along with a description
+// message and a progress indicator.
+type LastOperation struct {
+ // A human readable message indicating details about the last operation.
+ Description string `json:"description" protobuf:"bytes,1,opt,name=description"`
+ // Last time the operation state transitioned from one to another.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,2,opt,name=lastUpdateTime"`
+ // The progress in percentage (0-100) of the last operation.
+ Progress int32 `json:"progress" protobuf:"varint,3,opt,name=progress"`
+ // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed.
+ State LastOperationState `json:"state" protobuf:"bytes,4,opt,name=state,casttype=LastOperationState"`
+ // Type of the last operation, one of Create, Reconcile, Delete.
+ Type LastOperationType `json:"type" protobuf:"bytes,5,opt,name=type,casttype=LastOperationType"`
+}
+
+// Gardener holds the information about the Gardener version that operated a resource.
+type Gardener struct {
+ // ID is the Docker container id of the Gardener which last acted on a resource.
+ ID string `json:"id" protobuf:"bytes,1,opt,name=id"`
+ // Name is the hostname (pod name) of the Gardener which last acted on a resource.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+ // Version is the version of the Gardener which last acted on a resource.
+ Version string `json:"version" protobuf:"bytes,3,opt,name=version"`
+}
+
+const (
+ // GardenerName is the value in a Garden resource's `.metadata.finalizers[]` array on which the Gardener will react
+ // when performing a delete request on a resource.
+ GardenerName = "gardener"
+ // ExternalGardenerName is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the
+ // Gardener will react when performing a delete request on a resource.
+ ExternalGardenerName = "gardener.cloud/gardener"
+)
+
+const (
+ // EventReconciling indicates that the a Reconcile operation started.
+ EventReconciling = "Reconciling"
+ // EventReconciled indicates that the a Reconcile operation was successful.
+ EventReconciled = "Reconciled"
+ // EventReconcileError indicates that the a Reconcile operation failed.
+ EventReconcileError = "ReconcileError"
+ // EventDeleting indicates that the a Delete operation started.
+ EventDeleting = "Deleting"
+ // EventDeleted indicates that the a Delete operation was successful.
+ EventDeleted = "Deleted"
+ // EventDeleteError indicates that the a Delete operation failed.
+ EventDeleteError = "DeleteError"
+ // EventPrepareMigration indicates that a Prepare Migration operation started.
+ EventPrepareMigration = "PrepareMigration"
+ // EventMigrationPrepared indicates that Migration preparation was successful.
+ EventMigrationPrepared = "MigrationPrepared"
+ // EventMigrationPreparationFailed indicates that Migration preparation failed.
+ EventMigrationPreparationFailed = "MigrationPreparationFailed"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go
new file mode 100644
index 0000000..f2b2024
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerInstallation represents an installation request for an external controller.
+type ControllerInstallation struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this installation.
+ Spec ControllerInstallationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the status of this installation.
+ Status ControllerInstallationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerInstallationList is a collection of ControllerInstallations.
+type ControllerInstallationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of ControllerInstallations.
+ Items []ControllerInstallation `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ControllerInstallationSpec is the specification of a ControllerInstallation.
+type ControllerInstallationSpec struct {
+ // RegistrationRef is used to reference a ControllerRegistration resources.
+ RegistrationRef corev1.ObjectReference `json:"registrationRef" protobuf:"bytes,1,opt,name=registrationRef"`
+ // SeedRef is used to reference a Seed resources.
+ SeedRef corev1.ObjectReference `json:"seedRef" protobuf:"bytes,2,opt,name=seedRef"`
+}
+
+// ControllerInstallationStatus is the status of a ControllerInstallation.
+type ControllerInstallationStatus struct {
+ // Conditions represents the latest available observations of a ControllerInstallations's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // ProviderStatus contains type-specific status.
+ // +optional
+ ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,2,opt,name=providerStatus"`
+}
+
+const (
+ // ControllerInstallationHealthy is a condition type for indicating whether the controller is healthy.
+ ControllerInstallationHealthy ConditionType = "Healthy"
+ // ControllerInstallationInstalled is a condition type for indicating whether the controller has been installed.
+ ControllerInstallationInstalled ConditionType = "Installed"
+ // ControllerInstallationValid is a condition type for indicating whether the installation request is valid.
+ ControllerInstallationValid ConditionType = "Valid"
+ // ControllerInstallationRequired is a condition type for indicating that the respective extension controller is
+ // still required on the seed cluster as corresponding extension resources still exist.
+ ControllerInstallationRequired ConditionType = "Required"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go
new file mode 100644
index 0000000..c25970f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRegistration represents a registration of an external controller.
+type ControllerRegistration struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this registration.
+ Spec ControllerRegistrationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControllerRegistrationList is a collection of ControllerRegistrations.
+type ControllerRegistrationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of ControllerRegistrations.
+ Items []ControllerRegistration `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ControllerRegistrationSpec is the specification of a ControllerRegistration.
+type ControllerRegistrationSpec struct {
+ // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types
+ // (aws-route53, gcp, auditlog, ...).
+ // +optional
+ Resources []ControllerResource `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"`
+ // Deployment contains information for how this controller is deployed.
+ // +optional
+ Deployment *ControllerDeployment `json:"deployment,omitempty" protobuf:"bytes,2,opt,name=deployment"`
+}
+
+// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this
+// kind (aws-route53, gcp, auditlog, ...).
+type ControllerResource struct {
+ // Kind is the resource kind, for example "OperatingSystemConfig".
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // Type is the resource type, for example "coreos" or "ubuntu".
+ Type string `json:"type" protobuf:"bytes,2,opt,name=type"`
+ // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters.
+ // +optional
+ GloballyEnabled *bool `json:"globallyEnabled,omitempty" protobuf:"varint,3,opt,name=globallyEnabled"`
+ // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation.
+ // +optional
+ ReconcileTimeout *metav1.Duration `json:"reconcileTimeout,omitempty" protobuf:"bytes,4,opt,name=reconcileTimeout"`
+ // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension
+ // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type
+ // combination.
+ // +optional
+ Primary *bool `json:"primary,omitempty" protobuf:"varint,5,opt,name=primary"`
+}
+
+// ControllerDeployment contains information for how this controller is deployed.
+type ControllerDeployment struct {
+ // Type is the deployment type.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig contains type-specific configuration.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Policy controls how the controller is deployed. It defaults to 'OnDemand'.
+ // +optional
+ Policy *ControllerDeploymentPolicy `json:"policy,omitempty" protobuf:"bytes,3,opt,name=policy"`
+ // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be
+ // considered for a deployment.
+ // An empty list means that all seeds are selected.
+ // +optional
+ SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty" protobuf:"bytes,4,opt,name=seedSelector"`
+}
+
+// ControllerDeploymentPolicy is a string alias.
+type ControllerDeploymentPolicy string
+
+const (
+ // ControllerDeploymentPolicyOnDemand specifies that the controller shall be only deployed if required by another
+ // resource. If nothing requires it then the controller shall not be deployed.
+ ControllerDeploymentPolicyOnDemand ControllerDeploymentPolicy = "OnDemand"
+ // ControllerDeploymentPolicyAlways specifies that the controller shall be deployed always, independent of whether
+ // another resource requires it or the respective seed has shoots.
+ ControllerDeploymentPolicyAlways ControllerDeploymentPolicy = "Always"
+ // ControllerDeploymentPolicyAlwaysExceptNoShoots specifies that the controller shall be deployed always, independent of
+ // whether another resource requires it, but only when the respective seed has at least one shoot.
+ ControllerDeploymentPolicyAlwaysExceptNoShoots ControllerDeploymentPolicy = "AlwaysExceptNoShoots"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go
new file mode 100644
index 0000000..5177be6
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Plant struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this Plant.
+ Spec PlantSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the status of this Plant.
+ Status PlantStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PlantList is a collection of Plants.
+type PlantList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Plants.
+ Items []Plant `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+const (
+ // PlantEveryNodeReady is a constant for a condition type indicating the node health.
+ PlantEveryNodeReady ConditionType = "EveryNodeReady"
+ // PlantAPIServerAvailable is a constant for a condition type indicating that the Plant cluster API server is available.
+ PlantAPIServerAvailable ConditionType = "APIServerAvailable"
+)
+
+// PlantSpec is the specification of a Plant.
+type PlantSpec struct {
+ // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes
+ // clusters to be added to Gardener.
+ SecretRef corev1.LocalObjectReference `json:"secretRef" protobuf:"bytes,1,opt,name=secretRef"`
+ // Endpoints is the configuration plant endpoints
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=endpoints"`
+}
+
+// PlantStatus is the status of a Plant.
+type PlantStatus struct {
+ // Conditions represents the latest available observations of a Plant's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the
+ // Plant's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"`
+ // ClusterInfo is additional computed information about the newly added cluster (Plant)
+ ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty" protobuf:"bytes,3,opt,name=clusterInfo"`
+}
+
+// Endpoint is an endpoint for monitoring, logging and other services around the plant.
+type Endpoint struct {
+ // Name is the name of the endpoint
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // URL is the url of the endpoint
+ URL string `json:"url" protobuf:"bytes,2,opt,name=url"`
+ // Purpose is the purpose of the endpoint
+ Purpose string `json:"purpose" protobuf:"bytes,3,opt,name=purpose"`
+}
+
+// ClusterInfo contains information about the Plant cluster
+type ClusterInfo struct {
+ // Cloud describes the cloud information
+ Cloud CloudInfo `json:"cloud" protobuf:"bytes,1,opt,name=cloud"`
+ // Kubernetes describes kubernetes meta information (e.g., version)
+ Kubernetes KubernetesInfo `json:"kubernetes" protobuf:"bytes,2,opt,name=kubernetes"`
+}
+
+// CloudInfo contains information about the cloud
+type CloudInfo struct {
+ // Type is the cloud type
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // Region is the cloud region
+ Region string `json:"region" protobuf:"bytes,2,opt,name=region"`
+}
+
+// KubernetesInfo contains the version and configuration variables for the Plant cluster.
+type KubernetesInfo struct {
+ // Version is the semantic Kubernetes version to use for the Plant cluster.
+ Version string `json:"version" protobuf:"bytes,1,opt,name=version"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go
new file mode 100644
index 0000000..74a6e41
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go
@@ -0,0 +1,174 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Project holds certain properties about a Gardener project.
+type Project struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec defines the project properties.
+ // +optional
+ Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the Project.
+ // +optional
+ Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ProjectList is a collection of Projects.
+type ProjectList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Projects.
+ Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ProjectSpec is the specification of a Project.
+type ProjectSpec struct {
+ // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user
+ // who created the project.
+ // +optional
+ CreatedBy *rbacv1.Subject `json:"createdBy,omitempty" protobuf:"bytes,1,opt,name=createdBy"`
+ // Description is a human-readable description of what the project is used for.
+ // +optional
+ Description *string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"`
+ // Owner is a subject representing a user name, an email address, or any other identifier of a user owning
+ // the project.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner`
+ // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way
+ // to change the owner is to use this field.
+ // +optional
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ Owner *rbacv1.Subject `json:"owner,omitempty" protobuf:"bytes,3,opt,name=owner"`
+ // Purpose is a human-readable explanation of the project's purpose.
+ // +optional
+ Purpose *string `json:"purpose,omitempty" protobuf:"bytes,4,opt,name=purpose"`
+ // Members is a list of subjects representing a user name, an email address, or any other identifier of a user,
+ // group, or service account that has a certain role.
+ // +optional
+ Members []ProjectMember `json:"members,omitempty" protobuf:"bytes,5,rep,name=members"`
+ // Namespace is the name of the namespace that has been created for the Project object.
+ // A nil value means that Gardener will determine the name of the namespace.
+ // +optional
+ Namespace *string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
+ // Tolerations contains the tolerations for taints on seed clusters.
+ // +optional
+ Tolerations *ProjectTolerations `json:"tolerations,omitempty" protobuf:"bytes,7,opt,name=tolerations"`
+}
+
+// ProjectStatus holds the most recently observed status of the project.
+type ProjectStatus struct {
+ // ObservedGeneration is the most recent generation observed for this project.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+ // Phase is the current phase of the project.
+ Phase ProjectPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=ProjectPhase"`
+ // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused.
+ // +optional
+ StaleSinceTimestamp *metav1.Time `json:"staleSinceTimestamp,omitempty" protobuf:"bytes,3,opt,name=staleSinceTimestamp"`
+ // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted
+ // because it's stale/unused.
+ // +optional
+ StaleAutoDeleteTimestamp *metav1.Time `json:"staleAutoDeleteTimestamp,omitempty" protobuf:"bytes,4,opt,name=staleAutoDeleteTimestamp"`
+}
+
+// ProjectMember is a member of a project.
+type ProjectMember struct {
+ // Subject is representing a user name, an email address, or any other identifier of a user, group, or service
+ // account that has a certain role.
+ rbacv1.Subject `json:",inline" protobuf:"bytes,1,opt,name=subject"`
+ // Role represents the role of this member.
+ // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles`
+ // list.
+ // TODO: Remove this field in favor of the `owner` role in `v1`.
+ Role string `json:"role" protobuf:"bytes,2,opt,name=role"`
+ // Roles represents the list of roles of this member.
+ // +optional
+ Roles []string `json:"roles,omitempty" protobuf:"bytes,3,rep,name=roles"`
+}
+
+// ProjectTolerations contains the tolerations for taints on seed clusters.
+type ProjectTolerations struct {
+ // Defaults contains a list of tolerations that are added to the shoots in this project by default.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ Defaults []Toleration `json:"defaults,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,rep,name=defaults"`
+ // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note
+ // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ Whitelist []Toleration `json:"whitelist,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,2,rep,name=whitelist"`
+}
+
+// Toleration is a toleration for a seed taint.
+type Toleration struct {
+ // Key is the toleration key to be applied to a project or shoot.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // Value is the toleration value corresponding to the toleration key.
+ // +optional
+ Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+const (
+ // ProjectMemberAdmin is a const for a role that provides full admin access.
+ ProjectMemberAdmin = "admin"
+ // ProjectMemberOwner is a const for a role that provides full owner access.
+ ProjectMemberOwner = "owner"
+ // ProjectMemberUserAccessManager is a const for a role that provides permissions to manage human user(s, (groups)).
+ ProjectMemberUserAccessManager = "uam"
+ // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources.
+ ProjectMemberViewer = "viewer"
+ // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener.
+ ProjectMemberExtensionPrefix = "extension:"
+)
+
+// ProjectPhase is a label for the condition of a project at the current time.
+type ProjectPhase string
+
+const (
+ // ProjectPending indicates that the project reconciliation is pending.
+ ProjectPending ProjectPhase = "Pending"
+ // ProjectReady indicates that the project reconciliation was successful.
+ ProjectReady ProjectPhase = "Ready"
+ // ProjectFailed indicates that the project reconciliation failed.
+ ProjectFailed ProjectPhase = "Failed"
+ // ProjectTerminating indicates that the project is in termination process.
+ ProjectTerminating ProjectPhase = "Terminating"
+
+ // ProjectEventNamespaceReconcileFailed indicates that the namespace reconciliation has failed.
+ ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed"
+ // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded.
+ ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful"
+ // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed.
+ ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed"
+ // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion.
+ ProjectEventNamespaceMarkedForDeletion = "NamespaceMarkedForDeletion"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go
new file mode 100644
index 0000000..9803cb9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Quota struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec defines the Quota constraints.
+ // +optional
+ Spec QuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// QuotaList is a collection of Quotas.
+type QuotaList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Quotas.
+ Items []Quota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// QuotaSpec is the specification of a Quota.
+type QuotaSpec struct {
+ // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically.
+ // +optional
+ ClusterLifetimeDays *int32 `json:"clusterLifetimeDays,omitempty" protobuf:"varint,1,opt,name=clusterLifetimeDays"`
+ // Metrics is a list of resources which will be put under constraints.
+ Metrics corev1.ResourceList `json:"metrics" protobuf:"bytes,2,rep,name=metrics,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName"`
+ // Scope is the scope of the Quota object, either 'project' or 'secret'.
+ Scope corev1.ObjectReference `json:"scope" protobuf:"bytes,3,opt,name=scope"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go
new file mode 100644
index 0000000..5aa8bb2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type SecretBinding struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // SecretRef is a reference to a secret object in the same or another namespace.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"`
+ // Quotas is a list of references to Quota objects in the same or another namespace.
+ // +optional
+ Quotas []corev1.ObjectReference `json:"quotas,omitempty" protobuf:"bytes,3,rep,name=quotas"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SecretBindingList is a collection of SecretBindings.
+type SecretBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of SecretBindings.
+ Items []SecretBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go
new file mode 100644
index 0000000..fe4af0d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go
@@ -0,0 +1,320 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Seed represents an installation request for an external controller.
+type Seed struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec contains the specification of this installation.
+ Spec SeedSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains the status of this installation.
+ Status SeedStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SeedList is a collection of Seeds.
+type SeedList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Seeds.
+ Items []Seed `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// SeedSpec is the specification of a Seed.
+type SeedSpec struct {
+ // Backup holds the object store configuration for the backups of shoot (currently only etcd).
+ // If it is not specified, then there won't be any backups taken for shoots associated with this seed.
+ // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored
+ // under the configured object store.
+ // +optional
+ Backup *SeedBackup `json:"backup,omitempty" protobuf:"bytes,1,opt,name=backup"`
+ // DNS contains DNS-relevant information about this seed cluster.
+ DNS SeedDNS `json:"dns" protobuf:"bytes,2,opt,name=dns"`
+ // Networks defines the pod, service and worker network of the Seed cluster.
+ Networks SeedNetworks `json:"networks" protobuf:"bytes,3,opt,name=networks"`
+ // Provider defines the provider type and region for this Seed cluster.
+ Provider SeedProvider `json:"provider" protobuf:"bytes,4,opt,name=provider"`
+ // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for
+ // the account the Seed cluster has been deployed to.
+ // +optional
+ SecretRef *corev1.SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+ // Taints describes taints on the seed.
+ // +optional
+ Taints []SeedTaint `json:"taints,omitempty" protobuf:"bytes,6,rep,name=taints"`
+ // Volume contains settings for persistentvolumes created in the seed cluster.
+ // +optional
+ Volume *SeedVolume `json:"volume,omitempty" protobuf:"bytes,7,opt,name=volume"`
+ // Settings contains certain settings for this seed cluster.
+ // +optional
+ Settings *SeedSettings `json:"settings,omitempty" protobuf:"bytes,8,opt,name=settings"`
+ // Ingress configures Ingress specific settings of the Seed cluster.
+ // +optional
+ Ingress *Ingress `json:"ingress,omitempty" protobuf:"bytes,9,opt,name=ingress"`
+}
+
+// SeedStatus is the status of a Seed.
+type SeedStatus struct {
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ // +optional
+ Gardener *Gardener `json:"gardener,omitempty" protobuf:"bytes,1,opt,name=gardener"`
+ // KubernetesVersion is the Kubernetes version of the seed cluster.
+ // +optional
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty" protobuf:"bytes,2,opt,name=kubernetesVersion"`
+ // Conditions represents the latest available observations of a Seed's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,3,rep,name=conditions"`
+ // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the
+ // Seed's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"`
+ // ClusterIdentity is the identity of the Seed cluster
+ // +optional
+ ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,5,opt,name=clusterIdentity"`
+ // Capacity represents the total resources of a seed.
+ // +optional
+ Capacity corev1.ResourceList `json:"capacity,omitempty" protobuf:"bytes,6,rep,name=capacity"`
+ // Allocatable represents the resources of a seed that are available for scheduling.
+ // Defaults to Capacity.
+ // +optional
+ Allocatable corev1.ResourceList `json:"allocatable,omitempty" protobuf:"bytes,7,rep,name=allocatable"`
+}
+
+// SeedBackup contains the object store configuration for backups for shoot (currently only etcd).
+type SeedBackup struct {
+ // Provider is a provider name.
+ Provider string `json:"provider" protobuf:"bytes,1,opt,name=provider"`
+ // ProviderConfig is the configuration passed to BackupBucket resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Region is a region name.
+ // +optional
+ Region *string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"`
+ // SecretRef is a reference to a Secret object containing the cloud provider credentials for
+ // the object store where backups should be stored. It should have enough privileges to manipulate
+ // the objects as well as buckets.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// SeedDNS contains DNS-relevant information about this seed cluster.
+type SeedDNS struct {
+ // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ // This will be removed in the next API version and replaced by spec.ingress.domain.
+ // +optional
+ IngressDomain *string `json:"ingressDomain,omitempty" protobuf:"bytes,1,opt,name=ingressDomain"`
+ // Provider configures a DNSProvider
+ // +optional
+ Provider *SeedDNSProvider `json:"provider,omitempty" protobuf:"bytes,2,opt,name=provider"`
+}
+
+// SeedDNSProvider configures a DNSProvider for Seeds
+type SeedDNSProvider struct {
+ // Type describes the type of the dns-provider, for example `aws-route53`
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains.
+ SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"`
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,3,opt,name=domains"`
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,4,opt,name=zones"`
+}
+
+// Ingress configures the Ingress specific settings of the Seed cluster
+type Ingress struct {
+ // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used
+ // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable.
+ Domain string `json:"domain" protobuf:"bytes,1,opt,name=domain"`
+ // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain
+ Controller IngressController `json:"controller" protobuf:"bytes,2,opt,name=controller"`
+}
+
+// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain
+type IngressController struct {
+ // Kind defines which kind of IngressController to use, for example `nginx`
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // ProviderConfig specifies infrastructure specific configuration for the ingressController
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+}
+
+// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster.
+type SeedNetworks struct {
+ // Nodes is the CIDR of the node network.
+ // +optional
+ Nodes *string `json:"nodes,omitempty" protobuf:"bytes,1,opt,name=nodes"`
+ // Pods is the CIDR of the pod network.
+ Pods string `json:"pods" protobuf:"bytes,2,opt,name=pods"`
+ // Services is the CIDR of the service network.
+ Services string `json:"services" protobuf:"bytes,3,opt,name=services"`
+ // ShootDefaults contains the default networks CIDRs for shoots.
+ // +optional
+ ShootDefaults *ShootNetworks `json:"shootDefaults,omitempty" protobuf:"bytes,4,opt,name=shootDefaults"`
+ // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running
+ // in the seed cluster.
+ // +optional
+ BlockCIDRs []string `json:"blockCIDRs,omitempty" protobuf:"bytes,5,rep,name=blockCIDRs"`
+}
+
+// ShootNetworks contains the default networks CIDRs for shoots.
+type ShootNetworks struct {
+ // Pods is the CIDR of the pod network.
+ // +optional
+ Pods *string `json:"pods,omitempty" protobuf:"bytes,1,opt,name=pods"`
+ // Services is the CIDR of the service network.
+ // +optional
+ Services *string `json:"services,omitempty" protobuf:"bytes,2,opt,name=services"`
+}
+
+// SeedProvider defines the provider type and region for this Seed cluster.
+type SeedProvider struct {
+ // Type is the name of the provider.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig is the configuration passed to Seed resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Region is a name of a region.
+ Region string `json:"region" protobuf:"bytes,3,opt,name=region"`
+}
+
+// SeedSettings contains certain settings for this seed cluster.
+type SeedSettings struct {
+ // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed.
+ // +optional
+ ExcessCapacityReservation *SeedSettingExcessCapacityReservation `json:"excessCapacityReservation,omitempty" protobuf:"bytes,1,opt,name=excessCapacityReservation"`
+ // Scheduling controls settings for scheduling decisions for the seed.
+ // +optional
+ Scheduling *SeedSettingScheduling `json:"scheduling,omitempty" protobuf:"bytes,2,opt,name=scheduling"`
+ // ShootDNS controls the shoot DNS settings for the seed.
+ // +optional
+ ShootDNS *SeedSettingShootDNS `json:"shootDNS,omitempty" protobuf:"bytes,3,opt,name=shootDNS"`
+ // LoadBalancerServices controls certain settings for services of type load balancer that are created in the
+ // seed.
+ // +optional
+ LoadBalancerServices *SeedSettingLoadBalancerServices `json:"loadBalancerServices,omitempty" protobuf:"bytes,4,opt,name=loadBalancerServices"`
+ // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed.
+ // +optional
+ VerticalPodAutoscaler *SeedSettingVerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,5,opt,name=verticalPodAutoscaler"`
+}
+
+// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the
+// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11
+// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled.
+type SeedSettingExcessCapacityReservation struct {
+ // Enabled controls whether the excess capacity reservation should be enabled.
+ Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"`
+}
+
+// SeedSettingShootDNS controls the shoot DNS settings for the seed.
+type SeedSettingShootDNS struct {
+ // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the
+ // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here.
+ // This is useful for environments where DNS is not required.
+ Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"`
+}
+
+// SeedSettingScheduling controls settings for scheduling decisions for the seed.
+type SeedSettingScheduling struct {
+ // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds
+ // are not considered by the scheduler.
+ Visible bool `json:"visible" protobuf:"bytes,1,opt,name=visible"`
+}
+
+// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the
+// seed.
+type SeedSettingLoadBalancerServices struct {
+ // Annotations is a map of annotations that will be injected/merged into every load balancer service object.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"`
+}
+
+// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the
+// seed.
+type SeedSettingVerticalPodAutoscaler struct {
+ // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It
+ // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if
+ // your seed cluster already has another, manually/custom managed VPA deployment.
+ Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"`
+}
+
+// SeedTaint describes a taint on a seed.
+type SeedTaint struct {
+ // Key is the taint key to be applied to a seed.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // Value is the taint value corresponding to the taint key.
+ // +optional
+ Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+const (
+ // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds
+ // may only be used by shoots in the `garden` namespace.
+ SeedTaintProtected = "seed.gardener.cloud/protected"
+)
+
+// SeedVolume contains settings for persistentvolumes created in the seed cluster.
+type SeedVolume struct {
+ // MinimumSize defines the minimum size that should be used for PVCs in the seed.
+ // +optional
+ MinimumSize *resource.Quantity `json:"minimumSize,omitempty" protobuf:"bytes,1,opt,name=minimumSize"`
+ // Providers is a list of storage class provisioner types for the seed.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Providers []SeedVolumeProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=providers"`
+}
+
+// SeedVolumeProvider is a storage class provisioner type.
+type SeedVolumeProvider struct {
+ // Purpose is the purpose of this provider.
+ Purpose string `json:"purpose" protobuf:"bytes,1,opt,name=purpose"`
+ // Name is the name of the storage class provisioner type.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+}
+
+const (
+ // SeedBootstrapped is a constant for a condition type indicating that the seed cluster has been
+ // bootstrapped.
+ SeedBootstrapped ConditionType = "Bootstrapped"
+ // SeedExtensionsReady is a constant for a condition type indicating that the extensions are ready.
+ SeedExtensionsReady ConditionType = "ExtensionsReady"
+ // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready.
+ SeedGardenletReady ConditionType = "GardenletReady"
+)
+
+// Resource constants for Gardener object types
+const (
+ // ResourceShoots is a resource constant for the number of shoots.
+ ResourceShoots corev1.ResourceName = "shoots"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go
new file mode 100644
index 0000000..7dc41bb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go
@@ -0,0 +1,1179 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ "time"
+
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type Shoot struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the Shoot cluster.
+ // +optional
+ Spec ShootSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the Shoot cluster.
+ // +optional
+ Status ShootStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ShootList is a list of Shoot objects.
+type ShootList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list object metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of Shoots.
+ Items []Shoot `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ShootSpec is the specification of a Shoot.
+type ShootSpec struct {
+ // Addons contains information about enabled/disabled addons and their configuration.
+ // +optional
+ Addons *Addons `json:"addons,omitempty" protobuf:"bytes,1,opt,name=addons"`
+ // CloudProfileName is a name of a CloudProfile object.
+ CloudProfileName string `json:"cloudProfileName" protobuf:"bytes,2,opt,name=cloudProfileName"`
+ // DNS contains information about the DNS settings of the Shoot.
+ // +optional
+ DNS *DNS `json:"dns,omitempty" protobuf:"bytes,3,opt,name=dns"`
+ // Extensions contain type and provider information for Shoot extensions.
+ // +optional
+ Extensions []Extension `json:"extensions,omitempty" protobuf:"bytes,4,rep,name=extensions"`
+ // Hibernation contains information whether the Shoot is suspended or not.
+ // +optional
+ Hibernation *Hibernation `json:"hibernation,omitempty" protobuf:"bytes,5,opt,name=hibernation"`
+ // Kubernetes contains the version and configuration settings of the control plane components.
+ Kubernetes Kubernetes `json:"kubernetes" protobuf:"bytes,6,opt,name=kubernetes"`
+ // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc.
+ Networking Networking `json:"networking" protobuf:"bytes,7,opt,name=networking"`
+ // Maintenance contains information about the time window for maintenance operations and which
+ // operations should be performed.
+ // +optional
+ Maintenance *Maintenance `json:"maintenance,omitempty" protobuf:"bytes,8,opt,name=maintenance"`
+ // Monitoring contains information about custom monitoring configurations for the shoot.
+ // +optional
+ Monitoring *Monitoring `json:"monitoring,omitempty" protobuf:"bytes,9,opt,name=monitoring"`
+ // Provider contains all provider-specific and provider-relevant information.
+ Provider Provider `json:"provider" protobuf:"bytes,10,opt,name=provider"`
+ // Purpose is the purpose class for this cluster.
+ // +optional
+ Purpose *ShootPurpose `json:"purpose,omitempty" protobuf:"bytes,11,opt,name=purpose,casttype=ShootPurpose"`
+ // Region is a name of a region.
+ Region string `json:"region" protobuf:"bytes,12,opt,name=region"`
+ // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret.
+ // The credentials inside the provider secret will be used to create the shoot in the respective account.
+ SecretBindingName string `json:"secretBindingName" protobuf:"bytes,13,opt,name=secretBindingName"`
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot.
+ // +optional
+ SeedName *string `json:"seedName,omitempty" protobuf:"bytes,14,opt,name=seedName"`
+ // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed.
+ // +optional
+ SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,15,opt,name=seedSelector"`
+ // Resources holds a list of named resource references that can be referred to in extension configs by their names.
+ // +optional
+ Resources []NamedResourceReference `json:"resources,omitempty" protobuf:"bytes,16,rep,name=resources"`
+ // Tolerations contains the tolerations for taints on seed clusters.
+ // +patchMergeKey=key
+ // +patchStrategy=merge
+ // +optional
+ Tolerations []Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,17,rep,name=tolerations"`
+}
+
+// ShootStatus holds the most recently observed status of the Shoot cluster.
+type ShootStatus struct {
+ // Conditions represents the latest available observations of a Shoots's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // Constraints represents conditions of a Shoot's current state that constraint some operations on it.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Constraints []Condition `json:"constraints,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=constraints"`
+ // Gardener holds information about the Gardener which last acted on the Shoot.
+ Gardener Gardener `json:"gardener" protobuf:"bytes,3,opt,name=gardener"`
+ // IsHibernated indicates whether the Shoot is currently hibernated.
+ IsHibernated bool `json:"hibernated" protobuf:"varint,4,opt,name=hibernated"`
+ // LastOperation holds information about the last operation on the Shoot.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,5,opt,name=lastOperation"`
+ // LastErrors holds information about the last occurred error(s) during an operation.
+ // +optional
+ LastErrors []LastError `json:"lastErrors,omitempty" protobuf:"bytes,6,rep,name=lastErrors"`
+ // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the
+ // Shoot's generation, which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,7,opt,name=observedGeneration"`
+ // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation
+ // must be retried until we give up).
+ // +optional
+ RetryCycleStartTime *metav1.Time `json:"retryCycleStartTime,omitempty" protobuf:"bytes,8,opt,name=retryCycleStartTime"`
+ // SeedName is the name of the seed cluster that runs the control plane of the Shoot. This value is only written
+ // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds.
+ // +optional
+ SeedName *string `json:"seedName,omitempty" protobuf:"bytes,9,opt,name=seedName"`
+ // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and
+ // basically everything that is related to this particular Shoot.
+ TechnicalID string `json:"technicalID" protobuf:"bytes,10,opt,name=technicalID"`
+ // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters.
+ // It is used to compute unique hashes.
+ UID types.UID `json:"uid" protobuf:"bytes,11,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+ // ClusterIdentity is the identity of the Shoot cluster
+ // +optional
+ ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,12,opt,name=clusterIdentity"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Addons relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Addons is a collection of configuration for specific addons which are managed by the Gardener.
+type Addons struct {
+ // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon.
+ // +optional
+ KubernetesDashboard *KubernetesDashboard `json:"kubernetesDashboard,omitempty" protobuf:"bytes,1,opt,name=kubernetesDashboard"`
+ // NginxIngress holds configuration settings for the nginx-ingress addon.
+ // +optional
+ NginxIngress *NginxIngress `json:"nginxIngress,omitempty" protobuf:"bytes,2,opt,name=nginxIngress"`
+}
+
+// Addon allows enabling or disabling a specific addon and is used to derive from.
+type Addon struct {
+ // Enabled indicates whether the addon is enabled or not.
+ Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"`
+}
+
+// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon.
+type KubernetesDashboard struct {
+ Addon `json:",inline" protobuf:"bytes,2,opt,name=addon"`
+ // AuthenticationMode defines the authentication mode for the kubernetes-dashboard.
+ // +optional
+ AuthenticationMode *string `json:"authenticationMode,omitempty" protobuf:"bytes,1,opt,name=authenticationMode"`
+}
+
+const (
+ // KubernetesDashboardAuthModeBasic uses basic authentication mode for auth.
+ KubernetesDashboardAuthModeBasic = "basic"
+ // KubernetesDashboardAuthModeToken uses token-based mode for auth.
+ KubernetesDashboardAuthModeToken = "token"
+)
+
+// NginxIngress describes configuration values for the nginx-ingress addon.
+type NginxIngress struct {
+ Addon `json:",inline" protobuf:"bytes,1,opt,name=addon"`
+ // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress
+ // +optional
+ LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,2,rep,name=loadBalancerSourceRanges"`
+ // Config contains custom configuration for the nginx-ingress-controller configuration.
+ // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options
+ // +optional
+ Config map[string]string `json:"config,omitempty" protobuf:"bytes,3,rep,name=config"`
+ // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service`
+ // exposing the nginx-ingress. Defaults to `Cluster`.
+ // +optional
+ ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,4,opt,name=externalTrafficPolicy,casttype=k8s.io/api/core/v1.ServiceExternalTrafficPolicyType"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// DNS relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// DNS holds information about the provider, the hosted zone id and the domain.
+type DNS struct {
+ // Domain is the external available domain of the Shoot cluster. This domain will be written into the
+ // kubeconfig that is handed out to end-users. Once set it is immutable.
+ // +optional
+ Domain *string `json:"domain,omitempty" protobuf:"bytes,1,opt,name=domain"`
+ // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if
+ // not a default domain is used.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Providers []DNSProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=providers"`
+}
+
+// DNSProvider contains information about a DNS provider.
+type DNSProvider struct {
+ // Domains contains information about which domains shall be included/excluded for this provider.
+ // +optional
+ Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,1,opt,name=domains"`
+ // Primary indicates that this DNSProvider is used for shoot related domains.
+ // +optional
+ Primary *bool `json:"primary,omitempty" protobuf:"varint,2,opt,name=primary"`
+ // SecretName is a name of a secret containing credentials for the stated domain and the
+ // provider. When not specified, the Gardener will use the cloud provider credentials referenced
+ // by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override
+ // this behavior, i.e. forcing the Gardener to only look into the given secret.
+ // +optional
+ SecretName *string `json:"secretName,omitempty" protobuf:"bytes,3,opt,name=secretName"`
+ // Type is the DNS provider type.
+ // +optional
+ Type *string `json:"type,omitempty" protobuf:"bytes,4,opt,name=type"`
+ // Zones contains information about which hosted zones shall be included/excluded for this provider.
+ // +optional
+ Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,5,opt,name=zones"`
+}
+
+type DNSIncludeExclude struct {
+ // Include is a list of resources that shall be included.
+ // +optional
+ Include []string `json:"include,omitempty" protobuf:"bytes,1,rep,name=include"`
+ // Exclude is a list of resources that shall be excluded.
+ // +optional
+ Exclude []string `json:"exclude,omitempty" protobuf:"bytes,2,rep,name=exclude"`
+}
+
+// DefaultDomain is the default value in the Shoot's '.spec.dns.domain' when '.spec.dns.provider' is 'unmanaged'
+const DefaultDomain = "cluster.local"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Extension relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Extension contains type and provider information for Shoot extensions.
+type Extension struct {
+ // Type is the type of the extension resource.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig is the configuration passed to extension resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators.
+ // +optional
+ Disabled *bool `json:"disabled,omitempty" protobuf:"varint,3,opt,name=disabled"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// NamedResourceReference relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// NamedResourceReference is a named reference to a resource.
+type NamedResourceReference struct {
+ // Name of the resource reference.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // ResourceRef is a reference to a resource.
+ ResourceRef autoscalingv1.CrossVersionObjectReference `json:"resourceRef" protobuf:"bytes,2,opt,name=resourceRef"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Hibernation relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Hibernation contains information whether the Shoot is suspended or not.
+type Hibernation struct {
+ // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated.
+ // If it is false or nil, the Shoot's desired state is to be awaken.
+ // +optional
+ Enabled *bool `json:"enabled,omitempty" protobuf:"varint,1,opt,name=enabled"`
+ // Schedules determine the hibernation schedules.
+ // +optional
+ Schedules []HibernationSchedule `json:"schedules,omitempty" protobuf:"bytes,2,rep,name=schedules"`
+}
+
+// HibernationSchedule determines the hibernation schedule of a Shoot.
+// A Shoot will be regularly hibernated at each start time and will be woken up at each end time.
+// Start or End can be omitted, though at least one of each has to be specified.
+type HibernationSchedule struct {
+ // Start is a Cron spec at which time a Shoot will be hibernated.
+ // +optional
+ Start *string `json:"start,omitempty" protobuf:"bytes,1,opt,name=start"`
+ // End is a Cron spec at which time a Shoot will be woken up.
+ // +optional
+ End *string `json:"end,omitempty" protobuf:"bytes,2,opt,name=end"`
+ // Location is the time location in which both start and and shall be evaluated.
+ // +optional
+ Location *string `json:"location,omitempty" protobuf:"bytes,3,opt,name=location"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Kubernetes relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Kubernetes contains the version and configuration variables for the Shoot control plane.
+type Kubernetes struct {
+ // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true).
+ // +optional
+ AllowPrivilegedContainers *bool `json:"allowPrivilegedContainers,omitempty" protobuf:"varint,1,opt,name=allowPrivilegedContainers"`
+ // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+ // +optional
+ ClusterAutoscaler *ClusterAutoscaler `json:"clusterAutoscaler,omitempty" protobuf:"bytes,2,opt,name=clusterAutoscaler"`
+ // KubeAPIServer contains configuration settings for the kube-apiserver.
+ // +optional
+ KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty" protobuf:"bytes,3,opt,name=kubeAPIServer"`
+ // KubeControllerManager contains configuration settings for the kube-controller-manager.
+ // +optional
+ KubeControllerManager *KubeControllerManagerConfig `json:"kubeControllerManager,omitempty" protobuf:"bytes,4,opt,name=kubeControllerManager"`
+ // KubeScheduler contains configuration settings for the kube-scheduler.
+ // +optional
+ KubeScheduler *KubeSchedulerConfig `json:"kubeScheduler,omitempty" protobuf:"bytes,5,opt,name=kubeScheduler"`
+ // KubeProxy contains configuration settings for the kube-proxy.
+ // +optional
+ KubeProxy *KubeProxyConfig `json:"kubeProxy,omitempty" protobuf:"bytes,6,opt,name=kubeProxy"`
+ // Kubelet contains configuration settings for the kubelet.
+ // +optional
+ Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,7,opt,name=kubelet"`
+ // Version is the semantic Kubernetes version to use for the Shoot cluster.
+ Version string `json:"version" protobuf:"bytes,8,opt,name=version"`
+ // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+ // +optional
+ VerticalPodAutoscaler *VerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,9,opt,name=verticalPodAutoscaler"`
+}
+
+// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler.
+type ClusterAutoscaler struct {
+ // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour).
+ // +optional
+ ScaleDownDelayAfterAdd *metav1.Duration `json:"scaleDownDelayAfterAdd,omitempty" protobuf:"bytes,1,opt,name=scaleDownDelayAfterAdd"`
+ // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval).
+ // +optional
+ ScaleDownDelayAfterDelete *metav1.Duration `json:"scaleDownDelayAfterDelete,omitempty" protobuf:"bytes,2,opt,name=scaleDownDelayAfterDelete"`
+ // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins).
+ // +optional
+ ScaleDownDelayAfterFailure *metav1.Duration `json:"scaleDownDelayAfterFailure,omitempty" protobuf:"bytes,3,opt,name=scaleDownDelayAfterFailure"`
+ // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins).
+ // +optional
+ ScaleDownUnneededTime *metav1.Duration `json:"scaleDownUnneededTime,omitempty" protobuf:"bytes,4,opt,name=scaleDownUnneededTime"`
+ // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed
+ // +optional
+ ScaleDownUtilizationThreshold *float64 `json:"scaleDownUtilizationThreshold,omitempty" protobuf:"fixed64,5,opt,name=scaleDownUtilizationThreshold"`
+ // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs).
+ // +optional
+ ScanInterval *metav1.Duration `json:"scanInterval,omitempty" protobuf:"bytes,6,opt,name=scanInterval"`
+}
+
+// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler.
+type VerticalPodAutoscaler struct {
+ // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster.
+ Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"`
+ // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given
+ // threshold since its start and if it has only one container (default: 10m0s).
+ // +optional
+ EvictAfterOOMThreshold *metav1.Duration `json:"evictAfterOOMThreshold,omitempty" protobuf:"bytes,2,opt,name=evictAfterOOMThreshold"`
+ // EvictionRateBurst defines the burst of pods that can be evicted (default: 1)
+ // +optional
+ EvictionRateBurst *int32 `json:"evictionRateBurst,omitempty" protobuf:"varint,3,opt,name=evictionRateBurst"`
+ // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will
+ // disable the rate limiter (default: -1).
+ // +optional
+ EvictionRateLimit *float64 `json:"evictionRateLimit,omitempty" protobuf:"fixed64,4,opt,name=evictionRateLimit"`
+ // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one
+ // pod can be evicted (default: 0.5).
+ // +optional
+ EvictionTolerance *float64 `json:"evictionTolerance,omitempty" protobuf:"fixed64,5,opt,name=evictionTolerance"`
+ // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request
+ // (default: 0.15).
+ // +optional
+ RecommendationMarginFraction *float64 `json:"recommendationMarginFraction,omitempty" protobuf:"fixed64,6,opt,name=recommendationMarginFraction"`
+ // UpdaterInterval is the interval how often the updater should run (default: 1m0s).
+ // +optional
+ UpdaterInterval *metav1.Duration `json:"updaterInterval,omitempty" protobuf:"bytes,7,opt,name=updaterInterval"`
+ // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s).
+ // +optional
+ RecommenderInterval *metav1.Duration `json:"recommenderInterval,omitempty" protobuf:"bytes,8,opt,name=recommenderInterval"`
+}
+
+const (
+ // DefaultEvictionRateBurst is the default value for the EvictionRateBurst field in the VPA configuration.
+ DefaultEvictionRateBurst int32 = 1
+ // DefaultEvictionRateLimit is the default value for the EvictionRateLimit field in the VPA configuration.
+ DefaultEvictionRateLimit float64 = -1
+ // DefaultEvictionTolerance is the default value for the EvictionTolerance field in the VPA configuration.
+ DefaultEvictionTolerance = 0.5
+ // DefaultRecommendationMarginFraction is the default value for the RecommendationMarginFraction field in the VPA configuration.
+ DefaultRecommendationMarginFraction = 0.15
+)
+
+var (
+ // DefaultEvictAfterOOMThreshold is the default value for the EvictAfterOOMThreshold field in the VPA configuration.
+ DefaultEvictAfterOOMThreshold = metav1.Duration{Duration: 10 * time.Minute}
+ // DefaultUpdaterInterval is the default value for the UpdaterInterval field in the VPA configuration.
+ DefaultUpdaterInterval = metav1.Duration{Duration: time.Minute}
+ // DefaultRecommenderInterval is the default value for the RecommenderInterval field in the VPA configuration.
+ DefaultRecommenderInterval = metav1.Duration{Duration: time.Minute}
+)
+
+// KubernetesConfig contains common configuration fields for the control plane components.
+type KubernetesConfig struct {
+ // FeatureGates contains information about enabled feature gates.
+ // +optional
+ FeatureGates map[string]bool `json:"featureGates,omitempty" protobuf:"bytes,1,rep,name=featureGates"`
+}
+
+// KubeAPIServerConfig contains configuration settings for the kube-apiserver.
+type KubeAPIServerConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding
+ // configuration.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ AdmissionPlugins []AdmissionPlugin `json:"admissionPlugins,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=admissionPlugins"`
+ // APIAudiences are the identifiers of the API. The service account token authenticator will
+ // validate that tokens used against the API are bound to at least one of these audiences.
+ // Defaults to ["kubernetes"].
+ // +optional
+ APIAudiences []string `json:"apiAudiences,omitempty" protobuf:"bytes,3,rep,name=apiAudiences"`
+ // AuditConfig contains configuration settings for the audit of the kube-apiserver.
+ // +optional
+ AuditConfig *AuditConfig `json:"auditConfig,omitempty" protobuf:"bytes,4,opt,name=auditConfig"`
+ // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not.
+ // +optional
+ EnableBasicAuthentication *bool `json:"enableBasicAuthentication,omitempty" protobuf:"varint,5,opt,name=enableBasicAuthentication"`
+ // OIDCConfig contains configuration settings for the OIDC provider.
+ // +optional
+ OIDCConfig *OIDCConfig `json:"oidcConfig,omitempty" protobuf:"bytes,6,opt,name=oidcConfig"`
+ // RuntimeConfig contains information about enabled or disabled APIs.
+ // +optional
+ RuntimeConfig map[string]bool `json:"runtimeConfig,omitempty" protobuf:"bytes,7,rep,name=runtimeConfig"`
+ // ServiceAccountConfig contains configuration settings for the service account handling
+ // of the kube-apiserver.
+ // +optional
+ ServiceAccountConfig *ServiceAccountConfig `json:"serviceAccountConfig,omitempty" protobuf:"bytes,8,opt,name=serviceAccountConfig"`
+ // WatchCacheSizes contains configuration of the API server's watch cache sizes.
+ // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests
+ // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's
+ // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it
+ // might happen that controller watches are permanently stopped with `too old resource version` errors.
+ // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch
+ // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache).
+ // +optional
+ WatchCacheSizes *WatchCacheSizes `json:"watchCacheSizes,omitempty" protobuf:"bytes,9,opt,name=watchCacheSizes"`
+ // Requests contains configuration for request-specific settings for the kube-apiserver.
+ // +optional
+ Requests *KubeAPIServerRequests `json:"requests,omitempty" protobuf:"bytes,10,opt,name=requests"`
+}
+
+// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver.
+type KubeAPIServerRequests struct {
+ // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ MaxNonMutatingInflight *int32 `json:"maxNonMutatingInflight,omitempty" protobuf:"bytes,1,name=maxNonMutatingInflight"`
+ // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server
+ // exceeds this, it rejects requests.
+ // +optional
+ MaxMutatingInflight *int32 `json:"maxMutatingInflight,omitempty" protobuf:"bytes,2,name=maxMutatingInflight"`
+}
+
+// ServiceAccountConfig is the kube-apiserver configuration for service accounts.
+type ServiceAccountConfig struct {
+ // Issuer is the identifier of the service account token issuer. The issuer will assert this
+ // identifier in "iss" claim of issued tokens. This value is a string or URI.
+ // Defaults to URI of the API server.
+ // +optional
+ Issuer *string `json:"issuer,omitempty" protobuf:"bytes,1,opt,name=issuer"`
+ // SigningKeySecret is a reference to a secret that contains an optional private key of the
+ // service account token issuer. The issuer will sign issued ID tokens with this private key.
+ // Only useful if service account tokens are also issued by another external system.
+ // +optional
+ SigningKeySecret *corev1.LocalObjectReference `json:"signingKeySecretName,omitempty" protobuf:"bytes,2,opt,name=signingKeySecretName"`
+}
+
+// AuditConfig contains settings for audit of the api server
+type AuditConfig struct {
+ // AuditPolicy contains configuration settings for audit policy of the kube-apiserver.
+ // +optional
+ AuditPolicy *AuditPolicy `json:"auditPolicy,omitempty" protobuf:"bytes,1,opt,name=auditPolicy"`
+}
+
+// AuditPolicy contains audit policy for kube-apiserver
+type AuditPolicy struct {
+ // ConfigMapRef is a reference to a ConfigMap object in the same namespace,
+ // which contains the audit policy for the kube-apiserver.
+ // +optional
+ ConfigMapRef *corev1.ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
+}
+
+// OIDCConfig contains configuration settings for the OIDC provider.
+// Note: Descriptions were taken from the Kubernetes documentation.
+type OIDCConfig struct {
+ // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.
+ // +optional
+ CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"`
+ // ClientAuthentication can optionally contain client configuration used for kubeconfig generation.
+ // +optional
+ ClientAuthentication *OpenIDConnectClientAuthentication `json:"clientAuthentication,omitempty" protobuf:"bytes,2,opt,name=clientAuthentication"`
+ // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.
+ // +optional
+ ClientID *string `json:"clientID,omitempty" protobuf:"bytes,3,opt,name=clientID"`
+ // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details.
+ // +optional
+ GroupsClaim *string `json:"groupsClaim,omitempty" protobuf:"bytes,4,opt,name=groupsClaim"`
+ // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.
+ // +optional
+ GroupsPrefix *string `json:"groupsPrefix,omitempty" protobuf:"bytes,5,opt,name=groupsPrefix"`
+ // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).
+ // +optional
+ IssuerURL *string `json:"issuerURL,omitempty" protobuf:"bytes,6,opt,name=issuerURL"`
+ // ATTENTION: Only meaningful for Kubernetes >= 1.11
+ // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value.
+ // +optional
+ RequiredClaims map[string]string `json:"requiredClaims,omitempty" protobuf:"bytes,7,rep,name=requiredClaims"`
+ // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1
+ // +optional
+ SigningAlgs []string `json:"signingAlgs,omitempty" protobuf:"bytes,8,rep,name=signingAlgs"`
+ // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub")
+ // +optional
+ UsernameClaim *string `json:"usernameClaim,omitempty" protobuf:"bytes,9,opt,name=usernameClaim"`
+ // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'.
+ // +optional
+ UsernamePrefix *string `json:"usernamePrefix,omitempty" protobuf:"bytes,10,opt,name=usernamePrefix"`
+}
+
+// OpenIDConnectClientAuthentication contains configuration for OIDC clients.
+type OpenIDConnectClientAuthentication struct {
+ // Extra configuration added to kubeconfig's auth-provider.
+ // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token
+ // +optional
+ ExtraConfig map[string]string `json:"extraConfig,omitempty" protobuf:"bytes,1,rep,name=extraConfig"`
+ // The client Secret for the OpenID Connect client.
+ // +optional
+ Secret *string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+}
+
+// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration.
+type AdmissionPlugin struct {
+ // Name is the name of the plugin.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Config is the configuration of the plugin.
+ // +optional
+ Config *runtime.RawExtension `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
+}
+
+// WatchCacheSizes contains configuration of the API server's watch cache sizes.
+type WatchCacheSizes struct {
+ // Default configures the default watch cache size of the kube-apiserver
+ // (flag `--default-watch-cache-size`, defaults to 100).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ Default *int32 `json:"default,omitempty" protobuf:"varint,1,opt,name=default"`
+ // Resources configures the watch cache size of the kube-apiserver per resource
+ // (flag `--watch-cache-sizes`).
+ // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
+ // +optional
+ Resources []ResourceWatchCacheSize `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
+}
+
+// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource.
+type ResourceWatchCacheSize struct {
+ // APIGroup is the API group of the resource for which the watch cache size should be configured.
+ // An unset value is used to specify the legacy core API (e.g. for `secrets`).
+ // +optional
+ APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
+ // Resource is the name of the resource for which the watch cache size should be configured
+ // (in lowercase plural form, e.g. `secrets`).
+ Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+ // CacheSize specifies the watch cache size that should be configured for the specified resource.
+ CacheSize int32 `json:"size" protobuf:"varint,3,opt,name=size"`
+}
+
+// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.
+type KubeControllerManagerConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+ // +optional
+ HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscaler,omitempty" protobuf:"bytes,2,opt,name=horizontalPodAutoscaler"`
+ // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24)
+ // +optional
+ NodeCIDRMaskSize *int32 `json:"nodeCIDRMaskSize,omitempty" protobuf:"varint,3,opt,name=nodeCIDRMaskSize"`
+ // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.
+ // +optional
+ PodEvictionTimeout *metav1.Duration `json:"podEvictionTimeout,omitempty" protobuf:"bytes,4,opt,name=podEvictionTimeout"`
+}
+
+// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.
+// Note: Descriptions were taken from the Kubernetes documentation.
+type HorizontalPodAutoscalerConfig struct {
+ // The period after which a ready pod transition is considered to be the first.
+ // +optional
+ CPUInitializationPeriod *metav1.Duration `json:"cpuInitializationPeriod,omitempty" protobuf:"bytes,1,opt,name=cpuInitializationPeriod"`
+ // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.
+ // +optional
+ DownscaleDelay *metav1.Duration `json:"downscaleDelay,omitempty" protobuf:"bytes,2,opt,name=downscaleDelay"`
+ // The configurable window at which the controller will choose the highest recommendation for autoscaling.
+ // +optional
+ DownscaleStabilization *metav1.Duration `json:"downscaleStabilization,omitempty" protobuf:"bytes,3,opt,name=downscaleStabilization"`
+ // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time.
+ // +optional
+ InitialReadinessDelay *metav1.Duration `json:"initialReadinessDelay,omitempty" protobuf:"bytes,4,opt,name=initialReadinessDelay"`
+ // The period for syncing the number of pods in horizontal pod autoscaler.
+ // +optional
+ SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty" protobuf:"bytes,5,opt,name=syncPeriod"`
+ // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.
+ // +optional
+ Tolerance *float64 `json:"tolerance,omitempty" protobuf:"fixed64,6,opt,name=tolerance"`
+ // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.
+ // +optional
+ UpscaleDelay *metav1.Duration `json:"upscaleDelay,omitempty" protobuf:"bytes,7,opt,name=upscaleDelay"`
+}
+
+const (
+ // DefaultHPADownscaleDelay is a constant for the default HPA downscale delay for a Shoot cluster.
+ DefaultHPADownscaleDelay = 15 * time.Minute
+ // DefaultHPASyncPeriod is a constant for the default HPA sync period for a Shoot cluster.
+ DefaultHPASyncPeriod = 30 * time.Second
+ // DefaultHPATolerance is a constant for the default HPA tolerance for a Shoot cluster.
+ DefaultHPATolerance = 0.1
+ // DefaultHPAUpscaleDelay is for the default HPA upscale delay for a Shoot cluster.
+ DefaultHPAUpscaleDelay = 1 * time.Minute
+ // DefaultDownscaleStabilization is the default HPA downscale stabilization window for a Shoot cluster
+ DefaultDownscaleStabilization = 5 * time.Minute
+ // DefaultInitialReadinessDelay is for the default HPA ReadinessDelay value in the Shoot cluster
+ DefaultInitialReadinessDelay = 30 * time.Second
+ // DefaultCPUInitializationPeriod is the for the default value of the CPUInitializationPeriod in the Shoot cluster
+ DefaultCPUInitializationPeriod = 5 * time.Minute
+)
+
+// KubeSchedulerConfig contains configuration settings for the kube-scheduler.
+type KubeSchedulerConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler.
+ // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits
+ // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware
+ // of all the side-effects and consequences when changing it.
+ // +optional
+ KubeMaxPDVols *string `json:"kubeMaxPDVols,omitempty" protobuf:"bytes,2,opt,name=kubeMaxPDVols"`
+}
+
+// KubeProxyConfig contains configuration settings for the kube-proxy.
+type KubeProxyConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // Mode specifies which proxy mode to use.
+ // defaults to IPTables.
+ // +optional
+ Mode *ProxyMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode,casttype=ProxyMode"`
+}
+
+// ProxyMode available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
+// (newer, faster), 'ipvs' (newest, better in performance and scalability).
+// As of now only 'iptables' and 'ipvs' is supported by Gardener.
+// In Linux platform, if the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
+// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
+// and the fall back path is firstly iptables and then userspace.
+type ProxyMode string
+
+const (
+ // ProxyModeIPTables uses iptables as proxy implementation.
+ ProxyModeIPTables ProxyMode = "IPTables"
+ // ProxyModeIPVS uses ipvs as proxy implementation.
+ ProxyModeIPVS ProxyMode = "IPVS"
+)
+
+// KubeletConfig contains configuration settings for the kubelet.
+type KubeletConfig struct {
+ KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"`
+ // CPUCFSQuota allows you to disable/enable CPU throttling for Pods.
+ // +optional
+ CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty" protobuf:"varint,2,opt,name=cpuCFSQuota"`
+ // CPUManagerPolicy allows to set alternative CPU management policies (default: none).
+ // +optional
+ CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" protobuf:"bytes,3,opt,name=cpuManagerPolicy"`
+ // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "100Mi/1Gi/5%"
+ // nodefs.available: "5%"
+ // nodefs.inodesFree: "5%"
+ // imagefs.available: "5%"
+ // imagefs.inodesFree: "5%"
+ EvictionHard *KubeletConfigEviction `json:"evictionHard,omitempty" protobuf:"bytes,4,opt,name=evictionHard"`
+ // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
+ // +optional
+ // Default: 90
+ EvictionMaxPodGracePeriod *int32 `json:"evictionMaxPodGracePeriod,omitempty" protobuf:"varint,5,opt,name=evictionMaxPodGracePeriod"`
+ // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure.
+ // +optional
+ // Default: 0 for each resource
+ EvictionMinimumReclaim *KubeletConfigEvictionMinimumReclaim `json:"evictionMinimumReclaim,omitempty" protobuf:"bytes,6,opt,name=evictionMinimumReclaim"`
+ // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
+ // +optional
+ // Default: 4m0s
+ EvictionPressureTransitionPeriod *metav1.Duration `json:"evictionPressureTransitionPeriod,omitempty" protobuf:"bytes,7,opt,name=evictionPressureTransitionPeriod"`
+ // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: "200Mi/1.5Gi/10%"
+ // nodefs.available: "10%"
+ // nodefs.inodesFree: "10%"
+ // imagefs.available: "10%"
+ // imagefs.inodesFree: "10%"
+ EvictionSoft *KubeletConfigEviction `json:"evictionSoft,omitempty" protobuf:"bytes,8,opt,name=evictionSoft"`
+ // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction.
+ // +optional
+ // Default:
+ // memory.available: 1m30s
+ // nodefs.available: 1m30s
+ // nodefs.inodesFree: 1m30s
+ // imagefs.available: 1m30s
+ // imagefs.inodesFree: 1m30s
+ EvictionSoftGracePeriod *KubeletConfigEvictionSoftGracePeriod `json:"evictionSoftGracePeriod,omitempty" protobuf:"bytes,9,opt,name=evictionSoftGracePeriod"`
+ // MaxPods is the maximum number of Pods that are allowed by the Kubelet.
+ // +optional
+ // Default: 110
+ MaxPods *int32 `json:"maxPods,omitempty" protobuf:"varint,10,opt,name=maxPods"`
+ // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet.
+ // +optional
+ PodPIDsLimit *int64 `json:"podPidsLimit,omitempty" protobuf:"varint,11,opt,name=podPidsLimit"`
+ // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled.
+ // +optional
+ // Default: 1m
+ ImagePullProgressDeadline *metav1.Duration `json:"imagePullProgressDeadline,omitempty" protobuf:"bytes,12,opt,name=imagePullProgressDeadline"`
+ // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true).
+ // +optional
+ FailSwapOn *bool `json:"failSwapOn,omitempty" protobuf:"varint,13,opt,name=failSwapOn"`
+ // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ // Default: cpu=80m,memory=1Gi,pid=20k
+ KubeReserved *KubeletConfigReserved `json:"kubeReserved,omitempty" protobuf:"bytes,14,opt,name=kubeReserved"`
+ // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald).
+ // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied.
+ // +optional
+ SystemReserved *KubeletConfigReserved `json:"systemReserved,omitempty" protobuf:"bytes,15,opt,name=systemReserved"`
+}
+
+// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value.
+type KubeletConfigEviction struct {
+ // MemoryAvailable is the threshold for the free memory on the host server.
+ // +optional
+ MemoryAvailable *string `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"`
+ // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ ImageFSAvailable *string `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"`
+ // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem.
+ // +optional
+ ImageFSInodesFree *string `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"`
+ // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ NodeFSAvailable *string `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"`
+ // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem.
+ // +optional
+ NodeFSInodesFree *string `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"`
+}
+
+// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim.
+type KubeletConfigEvictionMinimumReclaim struct {
+ // MemoryAvailable is the threshold for the memory reclaim on the host server.
+ // +optional
+ MemoryAvailable *resource.Quantity `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"`
+ // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers).
+ // +optional
+ ImageFSAvailable *resource.Quantity `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"`
+ // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem.
+ // +optional
+ ImageFSInodesFree *resource.Quantity `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"`
+ // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc).
+ // +optional
+ NodeFSAvailable *resource.Quantity `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"`
+ // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem.
+ // +optional
+ NodeFSInodesFree *resource.Quantity `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"`
+}
+
+// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds.
+type KubeletConfigEvictionSoftGracePeriod struct {
+ // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold.
+ // +optional
+ MemoryAvailable *metav1.Duration `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"`
+ // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold.
+ // +optional
+ ImageFSAvailable *metav1.Duration `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"`
+ // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold.
+ // +optional
+ ImageFSInodesFree *metav1.Duration `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"`
+ // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold.
+ // +optional
+ NodeFSAvailable *metav1.Duration `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"`
+ // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold.
+ // +optional
+ NodeFSInodesFree *metav1.Duration `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"`
+}
+
+// KubeletConfigReserved contains reserved resources for daemons
+type KubeletConfigReserved struct {
+ // CPU is the reserved cpu.
+ // +optional
+ CPU *resource.Quantity `json:"cpu,omitempty" protobuf:"bytes,1,opt,name=cpu"`
+ // Memory is the reserved memory.
+ // +optional
+ Memory *resource.Quantity `json:"memory,omitempty" protobuf:"bytes,2,opt,name=memory"`
+ // EphemeralStorage is the reserved ephemeral-storage.
+ // +optional
+ EphemeralStorage *resource.Quantity `json:"ephemeralStorage,omitempty" protobuf:"bytes,3,opt,name=ephemeralStorage"`
+ // PID is the reserved process-ids.
+ // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15.
+ // +optional
+ PID *resource.Quantity `json:"pid,omitempty" protobuf:"bytes,4,opt,name=pid"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Networking relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Networking defines networking parameters for the shoot cluster.
+type Networking struct {
+ // Type identifies the type of the networking plugin.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ProviderConfig is the configuration passed to network resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Pods is the CIDR of the pod network.
+ // +optional
+ Pods *string `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"`
+ // Nodes is the CIDR of the entire node network.
+ // +optional
+ Nodes *string `json:"nodes,omitempty" protobuf:"bytes,4,opt,name=nodes"`
+ // Services is the CIDR of the service network.
+ // +optional
+ Services *string `json:"services,omitempty" protobuf:"bytes,5,opt,name=services"`
+}
+
+const (
+ // DefaultPodNetworkCIDR is a constant for the default pod network CIDR of a Shoot cluster.
+ DefaultPodNetworkCIDR = "100.96.0.0/11"
+ // DefaultServiceNetworkCIDR is a constant for the default service network CIDR of a Shoot cluster.
+ DefaultServiceNetworkCIDR = "100.64.0.0/13"
+)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Maintenance relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+const (
+ // MaintenanceTimeWindowDurationMinimum is the minimum duration for a maintenance time window.
+ MaintenanceTimeWindowDurationMinimum = 30 * time.Minute
+ // MaintenanceTimeWindowDurationMaximum is the maximum duration for a maintenance time window.
+ MaintenanceTimeWindowDurationMaximum = 6 * time.Hour
+)
+
+// Maintenance contains information about the time window for maintenance operations and which
+// operations should be performed.
+type Maintenance struct {
+ // AutoUpdate contains information about which constraints should be automatically updated.
+ // +optional
+ AutoUpdate *MaintenanceAutoUpdate `json:"autoUpdate,omitempty" protobuf:"bytes,1,opt,name=autoUpdate"`
+ // TimeWindow contains information about the time window for maintenance operations.
+ // +optional
+ TimeWindow *MaintenanceTimeWindow `json:"timeWindow,omitempty" protobuf:"bytes,2,opt,name=timeWindow"`
+ // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately.
+ // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger
+ // an immediate roll out which is changes to the Spec.Hibernation.Enabled field.
+ // +optional
+ ConfineSpecUpdateRollout *bool `json:"confineSpecUpdateRollout,omitempty" protobuf:"varint,3,opt,name=confineSpecUpdateRollout"`
+}
+
+// MaintenanceAutoUpdate contains information about which constraints should be automatically updated.
+type MaintenanceAutoUpdate struct {
+ // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true).
+ KubernetesVersion bool `json:"kubernetesVersion" protobuf:"varint,1,opt,name=kubernetesVersion"`
+ // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true).
+ MachineImageVersion bool `json:"machineImageVersion" protobuf:"varint,2,opt,name=machineImageVersion"`
+}
+
+// MaintenanceTimeWindow contains information about the time window for maintenance operations.
+type MaintenanceTimeWindow struct {
+ // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, a random value will be computed.
+ Begin string `json:"begin" protobuf:"bytes,1,opt,name=begin"`
+ // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100".
+ // If not present, the value will be computed based on the "Begin" value.
+ End string `json:"end" protobuf:"bytes,2,opt,name=end"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Monitoring relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Monitoring contains information about the monitoring configuration for the shoot.
+type Monitoring struct {
+ // Alerting contains information about the alerting configuration for the shoot cluster.
+ // +optional
+ Alerting *Alerting `json:"alerting,omitempty" protobuf:"bytes,1,opt,name=alerting"`
+}
+
+// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how).
+type Alerting struct {
+ // MonitoringEmailReceivers is a list of recipients for alerts
+ // +optional
+ EmailReceivers []string `json:"emailReceivers,omitempty" protobuf:"bytes,1,rep,name=emailReceivers"`
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Provider relevant types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Provider contains provider-specific information that are handed-over to the provider-specific
+// extension controller.
+type Provider struct {
+ // Type is the type of the provider.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ ControlPlaneConfig *runtime.RawExtension `json:"controlPlaneConfig,omitempty" protobuf:"bytes,2,opt,name=controlPlaneConfig"`
+ // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete
+ // definition in the documentation of your provider extension.
+ // +optional
+ InfrastructureConfig *runtime.RawExtension `json:"infrastructureConfig,omitempty" protobuf:"bytes,3,opt,name=infrastructureConfig"`
+ // Workers is a list of worker groups.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Workers []Worker `json:"workers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=workers"`
+}
+
+// Worker is the base definition of a worker group.
+type Worker struct {
+ // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"`
+ // CABundle is a certificate bundle which will be installed onto every machine of this worker pool.
+ // +optional
+ CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"`
+ // CRI contains configurations of CRI support of every machine in the worker pool
+ // +optional
+ CRI *CRI `json:"cri,omitempty" protobuf:"bytes,3,opt,name=cri"`
+ // Kubernetes contains configuration for Kubernetes components related to this worker pool.
+ // +optional
+ Kubernetes *WorkerKubernetes `json:"kubernetes,omitempty" protobuf:"bytes,4,opt,name=kubernetes"`
+ // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"`
+ // Name is the name of the worker group.
+ Name string `json:"name" protobuf:"bytes,6,opt,name=name"`
+ // Machine contains information about the machine type and image.
+ Machine Machine `json:"machine" protobuf:"bytes,7,opt,name=machine"`
+ // Maximum is the maximum number of VMs to create.
+ Maximum int32 `json:"maximum" protobuf:"varint,8,opt,name=maximum"`
+ // Minimum is the minimum number of VMs to create.
+ Minimum int32 `json:"minimum" protobuf:"varint,9,opt,name=minimum"`
+ // MaxSurge is maximum number of VMs that are created during an update.
+ // +optional
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,10,opt,name=maxSurge"`
+ // MaxUnavailable is the maximum number of VMs that can be unavailable during an update.
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,11,opt,name=maxUnavailable"`
+ // ProviderConfig is the provider-specific configuration for this worker pool.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,12,opt,name=providerConfig"`
+ // Taints is a list of taints for all the `Node` objects in this worker pool.
+ // +optional
+ Taints []corev1.Taint `json:"taints,omitempty" protobuf:"bytes,13,rep,name=taints"`
+ // Volume contains information about the volume type and size.
+ // +optional
+ Volume *Volume `json:"volume,omitempty" protobuf:"bytes,14,opt,name=volume"`
+ // DataVolumes contains a list of additional worker volumes.
+ // +optional
+ DataVolumes []DataVolume `json:"dataVolumes,omitempty" protobuf:"bytes,15,rep,name=dataVolumes"`
+ // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state.
+ // +optional
+ KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty" protobuf:"bytes,16,opt,name=kubeletDataVolumeName"`
+ // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional
+ // as not every provider may support availability zones.
+ // +optional
+ Zones []string `json:"zones,omitempty" protobuf:"bytes,17,rep,name=zones"`
+ // SystemComponents contains configuration for system components related to this worker pool
+ // +optional
+ SystemComponents *WorkerSystemComponents `json:"systemComponents,omitempty" protobuf:"bytes,18,opt,name=systemComponents"`
+ // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+ // +optional
+ MachineControllerManagerSettings *MachineControllerManagerSettings `json:"machineControllerManager,omitempty" protobuf:"bytes,19,opt,name=machineControllerManager"`
+}
+
+// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+type MachineControllerManagerSettings struct {
+ // MachineDrainTimeout is the period after which machine is forcefully deleted.
+ // +optional
+ MachineDrainTimeout *metav1.Duration `json:"machineDrainTimeout,omitempty" protobuf:"bytes,1,name=machineDrainTimeout"`
+ // MachineHealthTimeout is the period after which machine is declared failed.
+ // +optional
+ MachineHealthTimeout *metav1.Duration `json:"machineHealthTimeout,omitempty" protobuf:"bytes,2,name=machineHealthTimeout"`
+ // MachineCreationTimeout is the period after which creation of the machine is declared failed.
+ // +optional
+ MachineCreationTimeout *metav1.Duration `json:"machineCreationTimeout,omitempty" protobuf:"bytes,3,name=machineCreationTimeout"`
+ // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered.
+ // +optional
+ MaxEvictRetries *int32 `json:"maxEvictRetries,omitempty" protobuf:"bytes,4,name=maxEvictRetries"`
+ // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed.
+ // +optional
+ NodeConditions []string `json:"nodeConditions,omitempty" protobuf:"bytes,5,name=nodeConditions"`
+}
+
+// WorkerSystemComponents contains configuration for system components related to this worker pool
+type WorkerSystemComponents struct {
+ // Allow determines whether the pool should be allowed to host system components or not (defaults to true)
+ Allow bool `json:"allow" protobuf:"bytes,1,name=allow"`
+}
+
+// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool.
+type WorkerKubernetes struct {
+ // Kubelet contains configuration settings for all kubelets of this worker pool.
+ // +optional
+ Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,1,opt,name=kubelet"`
+}
+
+// Machine contains information about the machine type and image.
+type Machine struct {
+ // Type is the machine type of the worker group.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // Image holds information about the machine image to use for all nodes of this pool. It will default to the
+ // latest version of the first image stated in the referenced CloudProfile if no value has been provided.
+ // +optional
+ Image *ShootMachineImage `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+}
+
+// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be
+// defined in the respective CloudProfile.
+type ShootMachineImage struct {
+ // Name is the name of the image.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // ProviderConfig is the shoot's individual configuration passed to an extension resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+ // Version is the version of the shoot's image.
+ // If version is not provided, it will be defaulted to the latest version from the CloudProfile.
+ // +optional
+ Version *string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"`
+}
+
+// Volume contains information about the volume type, size, and encryption.
+type Volume struct {
+ // Name of the volume to make it referencable.
+ // +optional
+ Name *string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+ // Type is the type of the volume.
+ // +optional
+ Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
+ // VolumeSize is the size of the volume.
+ VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"`
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=encrypted"`
+}
+
+// DataVolume contains information about a data volume.
+type DataVolume struct {
+ // Name of the volume to make it referencable.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Type is the type of the volume.
+ // +optional
+ Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
+ // VolumeSize is the size of the volume.
+ VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"`
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=encrypted"`
+}
+
+// CRI contains information about the Container Runtimes.
+type CRI struct {
+ // The name of the CRI library
+ Name CRIName `json:"name" protobuf:"bytes,1,opt,name=name,casttype=CRIName"`
+ // ContainerRuntimes is the list of the required container runtimes supported for a worker pool.
+ // +optional
+ ContainerRuntimes []ContainerRuntime `json:"containerRuntimes,omitempty" protobuf:"bytes,2,rep,name=containerRuntimes"`
+}
+
+// CRIName is a type alias for the CRI name string.
+type CRIName string
+
+const (
+ CRINameContainerD CRIName = "containerd"
+)
+
+// ContainerRuntime contains information about worker's available container runtime
+type ContainerRuntime struct {
+ // Type is the type of the Container Runtime.
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+
+ // ProviderConfig is the configuration passed to container runtime resource.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"`
+}
+
+var (
+ // DefaultWorkerMaxSurge is the default value for Worker MaxSurge.
+ DefaultWorkerMaxSurge = intstr.FromInt(1)
+ // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable.
+ DefaultWorkerMaxUnavailable = intstr.FromInt(0)
+ // DefaultWorkerSystemComponentsAllow is the default value for Worker AllowSystemComponents
+ DefaultWorkerSystemComponentsAllow = true
+)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// Other/miscellaneous constants and types //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+const (
+ // ShootEventImageVersionMaintenance indicates that a maintenance operation regarding the image version has been performed.
+ ShootEventImageVersionMaintenance = "MachineImageVersionMaintenance"
+ // ShootEventK8sVersionMaintenance indicates that a maintenance operation regarding the K8s version has been performed.
+ ShootEventK8sVersionMaintenance = "KubernetesVersionMaintenance"
+ // ShootEventHibernationEnabled indicates that hibernation started.
+ ShootEventHibernationEnabled = "Hibernated"
+ // ShootEventHibernationDisabled indicates that hibernation ended.
+ ShootEventHibernationDisabled = "WokenUp"
+ // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully.
+ ShootEventSchedulingSuccessful = "SchedulingSuccessful"
+ // ShootEventSchedulingFailed indicates that a scheduling decision failed.
+ ShootEventSchedulingFailed = "SchedulingFailed"
+)
+
+const (
+ // ShootAPIServerAvailable is a constant for a condition type indicating that the Shoot cluster's API server is available.
+ ShootAPIServerAvailable ConditionType = "APIServerAvailable"
+ // ShootControlPlaneHealthy is a constant for a condition type indicating the control plane health.
+ ShootControlPlaneHealthy ConditionType = "ControlPlaneHealthy"
+ // ShootEveryNodeReady is a constant for a condition type indicating the node health.
+ ShootEveryNodeReady ConditionType = "EveryNodeReady"
+ // ShootSystemComponentsHealthy is a constant for a condition type indicating the system components health.
+ ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy"
+ // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated.
+ ShootHibernationPossible ConditionType = "HibernationPossible"
+ // ShootMaintenancePreconditionsSatisfied is a constant for a condition type indicating whether all preconditions
+ // for a shoot maintenance operation are satisfied.
+ ShootMaintenancePreconditionsSatisfied ConditionType = "MaintenancePreconditionsSatisfied"
+)
+
+// ShootPurpose is a type alias for string.
+type ShootPurpose string
+
+const (
+ // ShootPurposeEvaluation is a constant for the evaluation purpose.
+ ShootPurposeEvaluation ShootPurpose = "evaluation"
+ // ShootPurposeTesting is a constant for the testing purpose.
+ ShootPurposeTesting ShootPurpose = "testing"
+ // ShootPurposeDevelopment is a constant for the development purpose.
+ ShootPurposeDevelopment ShootPurpose = "development"
+ // ShootPurposeProduction is a constant for the production purpose.
+ ShootPurposeProduction ShootPurpose = "production"
+ // ShootPurposeInfrastructure is a constant for the infrastructure purpose.
+ ShootPurposeInfrastructure ShootPurpose = "infrastructure"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go
new file mode 100644
index 0000000..63dc3d5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // EventSchedulingSuccessful is an event reason for successful scheduling.
+ EventSchedulingSuccessful = "SchedulingSuccessful"
+ // EventSchedulingFailed is an event reason for failed scheduling.
+ EventSchedulingFailed = "SchedulingFailed"
+)
+
+// ConditionStatus is the status of a condition.
+type ConditionStatus string
+
+// ConditionType is a string alias.
+type ConditionType string
+
+// Condition holds the information about the state of a resource.
+type Condition struct {
+ // Type of the Shoot condition.
+ Type ConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // Last time the condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,4,opt,name=lastUpdateTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message" protobuf:"bytes,6,opt,name=message"`
+ // Well-defined error codes in case the condition reports a problem.
+ // +optional
+ Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,7,rep,name=codes,casttype=ErrorCode"`
+}
+
+const (
+ // ConditionTrue means a resource is in the condition.
+ ConditionTrue ConditionStatus = "True"
+ // ConditionFalse means a resource is not in the condition.
+ ConditionFalse ConditionStatus = "False"
+ // ConditionUnknown means Gardener can't decide if a resource is in the condition or not.
+ ConditionUnknown ConditionStatus = "Unknown"
+ // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold.
+ // In the future, we could add other intermediate conditions, e.g. ConditionDegraded.
+ ConditionProgressing ConditionStatus = "Progressing"
+
+ // ConditionCheckError is a constant for a reason in condition.
+ ConditionCheckError = "ConditionCheckError"
+ // ManagedResourceMissingConditionError is a constant for a reason in a condition that indicates
+ // one or multiple missing conditions in the observed managed resource.
+ ManagedResourceMissingConditionError = "MissingManagedResourceCondition"
+ // OutdatedStatusError is a constant for a reason in a condition that indicates
+ // that the observed generation in a status is outdated.
+ OutdatedStatusError = "OutdatedStatus"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000..b987e3b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,4874 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ unsafe "unsafe"
+
+ core "github.com/gardener/gardener/pkg/apis/core"
+ v1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ resource "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ types "k8s.io/apimachinery/pkg/types"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*core.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Addon_To_core_Addon(a.(*Addon), b.(*core.Addon), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Addon_To_v1beta1_Addon(a.(*core.Addon), b.(*Addon), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Addons)(nil), (*core.Addons)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Addons_To_core_Addons(a.(*Addons), b.(*core.Addons), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Addons)(nil), (*Addons)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Addons_To_v1beta1_Addons(a.(*core.Addons), b.(*Addons), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AdmissionPlugin)(nil), (*core.AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(a.(*AdmissionPlugin), b.(*core.AdmissionPlugin), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AdmissionPlugin)(nil), (*AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(a.(*core.AdmissionPlugin), b.(*AdmissionPlugin), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Alerting)(nil), (*core.Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Alerting_To_core_Alerting(a.(*Alerting), b.(*core.Alerting), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Alerting)(nil), (*Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Alerting_To_v1beta1_Alerting(a.(*core.Alerting), b.(*Alerting), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AuditConfig)(nil), (*core.AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AuditConfig_To_core_AuditConfig(a.(*AuditConfig), b.(*core.AuditConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AuditConfig)(nil), (*AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AuditConfig_To_v1beta1_AuditConfig(a.(*core.AuditConfig), b.(*AuditConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AuditPolicy)(nil), (*core.AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AuditPolicy_To_core_AuditPolicy(a.(*AuditPolicy), b.(*core.AuditPolicy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AuditPolicy)(nil), (*AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AuditPolicy_To_v1beta1_AuditPolicy(a.(*core.AuditPolicy), b.(*AuditPolicy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AvailabilityZone)(nil), (*core.AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(a.(*AvailabilityZone), b.(*core.AvailabilityZone), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.AvailabilityZone)(nil), (*AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(a.(*core.AvailabilityZone), b.(*AvailabilityZone), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucket)(nil), (*core.BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupBucket_To_core_BackupBucket(a.(*BackupBucket), b.(*core.BackupBucket), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucket)(nil), (*BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucket_To_v1beta1_BackupBucket(a.(*core.BackupBucket), b.(*BackupBucket), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketList)(nil), (*core.BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupBucketList_To_core_BackupBucketList(a.(*BackupBucketList), b.(*core.BackupBucketList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketList)(nil), (*BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketList_To_v1beta1_BackupBucketList(a.(*core.BackupBucketList), b.(*BackupBucketList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketProvider)(nil), (*core.BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(a.(*BackupBucketProvider), b.(*core.BackupBucketProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketProvider)(nil), (*BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(a.(*core.BackupBucketProvider), b.(*BackupBucketProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketSpec)(nil), (*core.BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(a.(*BackupBucketSpec), b.(*core.BackupBucketSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketSpec)(nil), (*BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(a.(*core.BackupBucketSpec), b.(*BackupBucketSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupBucketStatus)(nil), (*core.BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(a.(*BackupBucketStatus), b.(*core.BackupBucketStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupBucketStatus)(nil), (*BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(a.(*core.BackupBucketStatus), b.(*BackupBucketStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupEntry)(nil), (*core.BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupEntry_To_core_BackupEntry(a.(*BackupEntry), b.(*core.BackupEntry), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupEntry)(nil), (*BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntry_To_v1beta1_BackupEntry(a.(*core.BackupEntry), b.(*BackupEntry), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupEntryList)(nil), (*core.BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupEntryList_To_core_BackupEntryList(a.(*BackupEntryList), b.(*core.BackupEntryList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupEntryList)(nil), (*BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntryList_To_v1beta1_BackupEntryList(a.(*core.BackupEntryList), b.(*BackupEntryList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupEntrySpec)(nil), (*core.BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(a.(*BackupEntrySpec), b.(*core.BackupEntrySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupEntrySpec)(nil), (*BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(a.(*core.BackupEntrySpec), b.(*BackupEntrySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BackupEntryStatus)(nil), (*core.BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(a.(*BackupEntryStatus), b.(*core.BackupEntryStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.BackupEntryStatus)(nil), (*BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(a.(*core.BackupEntryStatus), b.(*BackupEntryStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CRI)(nil), (*core.CRI)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CRI_To_core_CRI(a.(*CRI), b.(*core.CRI), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CRI)(nil), (*CRI)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CRI_To_v1beta1_CRI(a.(*core.CRI), b.(*CRI), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudInfo)(nil), (*core.CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CloudInfo_To_core_CloudInfo(a.(*CloudInfo), b.(*core.CloudInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudInfo)(nil), (*CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudInfo_To_v1beta1_CloudInfo(a.(*core.CloudInfo), b.(*CloudInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudProfile)(nil), (*core.CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CloudProfile_To_core_CloudProfile(a.(*CloudProfile), b.(*core.CloudProfile), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudProfile)(nil), (*CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudProfile_To_v1beta1_CloudProfile(a.(*core.CloudProfile), b.(*CloudProfile), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudProfileList)(nil), (*core.CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CloudProfileList_To_core_CloudProfileList(a.(*CloudProfileList), b.(*core.CloudProfileList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudProfileList)(nil), (*CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudProfileList_To_v1beta1_CloudProfileList(a.(*core.CloudProfileList), b.(*CloudProfileList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudProfileSpec)(nil), (*core.CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(a.(*CloudProfileSpec), b.(*core.CloudProfileSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.CloudProfileSpec)(nil), (*CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(a.(*core.CloudProfileSpec), b.(*CloudProfileSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClusterAutoscaler)(nil), (*core.ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(a.(*ClusterAutoscaler), b.(*core.ClusterAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ClusterAutoscaler)(nil), (*ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(a.(*core.ClusterAutoscaler), b.(*ClusterAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClusterInfo)(nil), (*core.ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ClusterInfo_To_core_ClusterInfo(a.(*ClusterInfo), b.(*core.ClusterInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ClusterInfo)(nil), (*ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ClusterInfo_To_v1beta1_ClusterInfo(a.(*core.ClusterInfo), b.(*ClusterInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Condition)(nil), (*core.Condition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Condition_To_core_Condition(a.(*Condition), b.(*core.Condition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Condition)(nil), (*Condition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Condition_To_v1beta1_Condition(a.(*core.Condition), b.(*Condition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ContainerRuntime)(nil), (*core.ContainerRuntime)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ContainerRuntime_To_core_ContainerRuntime(a.(*ContainerRuntime), b.(*core.ContainerRuntime), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ContainerRuntime)(nil), (*ContainerRuntime)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ContainerRuntime_To_v1beta1_ContainerRuntime(a.(*core.ContainerRuntime), b.(*ContainerRuntime), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerDeployment)(nil), (*core.ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(a.(*ControllerDeployment), b.(*core.ControllerDeployment), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerDeployment)(nil), (*ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(a.(*core.ControllerDeployment), b.(*ControllerDeployment), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallation)(nil), (*core.ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(a.(*ControllerInstallation), b.(*core.ControllerInstallation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallation)(nil), (*ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(a.(*core.ControllerInstallation), b.(*ControllerInstallation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallationList)(nil), (*core.ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(a.(*ControllerInstallationList), b.(*core.ControllerInstallationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationList)(nil), (*ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(a.(*core.ControllerInstallationList), b.(*ControllerInstallationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallationSpec)(nil), (*core.ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(a.(*ControllerInstallationSpec), b.(*core.ControllerInstallationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationSpec)(nil), (*ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(a.(*core.ControllerInstallationSpec), b.(*ControllerInstallationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerInstallationStatus)(nil), (*core.ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(a.(*ControllerInstallationStatus), b.(*core.ControllerInstallationStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationStatus)(nil), (*ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(a.(*core.ControllerInstallationStatus), b.(*ControllerInstallationStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerRegistration)(nil), (*core.ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(a.(*ControllerRegistration), b.(*core.ControllerRegistration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerRegistration)(nil), (*ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(a.(*core.ControllerRegistration), b.(*ControllerRegistration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerRegistrationList)(nil), (*core.ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(a.(*ControllerRegistrationList), b.(*core.ControllerRegistrationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationList)(nil), (*ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(a.(*core.ControllerRegistrationList), b.(*ControllerRegistrationList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerRegistrationSpec)(nil), (*core.ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(a.(*ControllerRegistrationSpec), b.(*core.ControllerRegistrationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationSpec)(nil), (*ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(a.(*core.ControllerRegistrationSpec), b.(*ControllerRegistrationSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControllerResource)(nil), (*core.ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControllerResource_To_core_ControllerResource(a.(*ControllerResource), b.(*core.ControllerResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ControllerResource)(nil), (*ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ControllerResource_To_v1beta1_ControllerResource(a.(*core.ControllerResource), b.(*ControllerResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*core.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_DNS_To_core_DNS(a.(*DNS), b.(*core.DNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DNS)(nil), (*DNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DNS_To_v1beta1_DNS(a.(*core.DNS), b.(*DNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DNSIncludeExclude)(nil), (*core.DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(a.(*DNSIncludeExclude), b.(*core.DNSIncludeExclude), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DNSIncludeExclude)(nil), (*DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(a.(*core.DNSIncludeExclude), b.(*DNSIncludeExclude), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DNSProvider)(nil), (*core.DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_DNSProvider_To_core_DNSProvider(a.(*DNSProvider), b.(*core.DNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DNSProvider)(nil), (*DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DNSProvider_To_v1beta1_DNSProvider(a.(*core.DNSProvider), b.(*DNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*DataVolume)(nil), (*core.DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_DataVolume_To_core_DataVolume(a.(*DataVolume), b.(*core.DataVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.DataVolume)(nil), (*DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_DataVolume_To_v1beta1_DataVolume(a.(*core.DataVolume), b.(*DataVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Endpoint)(nil), (*core.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Endpoint_To_core_Endpoint(a.(*Endpoint), b.(*core.Endpoint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Endpoint)(nil), (*Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Endpoint_To_v1beta1_Endpoint(a.(*core.Endpoint), b.(*Endpoint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExpirableVersion)(nil), (*core.ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(a.(*ExpirableVersion), b.(*core.ExpirableVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ExpirableVersion)(nil), (*ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(a.(*core.ExpirableVersion), b.(*ExpirableVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Extension)(nil), (*core.Extension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Extension_To_core_Extension(a.(*Extension), b.(*core.Extension), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Extension)(nil), (*Extension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Extension_To_v1beta1_Extension(a.(*core.Extension), b.(*Extension), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Gardener)(nil), (*core.Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Gardener_To_core_Gardener(a.(*Gardener), b.(*core.Gardener), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Gardener)(nil), (*Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Gardener_To_v1beta1_Gardener(a.(*core.Gardener), b.(*Gardener), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Hibernation)(nil), (*core.Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Hibernation_To_core_Hibernation(a.(*Hibernation), b.(*core.Hibernation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Hibernation)(nil), (*Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Hibernation_To_v1beta1_Hibernation(a.(*core.Hibernation), b.(*Hibernation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*HibernationSchedule)(nil), (*core.HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(a.(*HibernationSchedule), b.(*core.HibernationSchedule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.HibernationSchedule)(nil), (*HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(a.(*core.HibernationSchedule), b.(*HibernationSchedule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*HorizontalPodAutoscalerConfig)(nil), (*core.HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(a.(*HorizontalPodAutoscalerConfig), b.(*core.HorizontalPodAutoscalerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.HorizontalPodAutoscalerConfig)(nil), (*HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(a.(*core.HorizontalPodAutoscalerConfig), b.(*HorizontalPodAutoscalerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Ingress)(nil), (*core.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Ingress_To_core_Ingress(a.(*Ingress), b.(*core.Ingress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Ingress)(nil), (*Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Ingress_To_v1beta1_Ingress(a.(*core.Ingress), b.(*Ingress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*IngressController)(nil), (*core.IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_IngressController_To_core_IngressController(a.(*IngressController), b.(*core.IngressController), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.IngressController)(nil), (*IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_IngressController_To_v1beta1_IngressController(a.(*core.IngressController), b.(*IngressController), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeAPIServerConfig)(nil), (*core.KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(a.(*KubeAPIServerConfig), b.(*core.KubeAPIServerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerConfig)(nil), (*KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(a.(*core.KubeAPIServerConfig), b.(*KubeAPIServerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeAPIServerRequests)(nil), (*core.KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(a.(*KubeAPIServerRequests), b.(*core.KubeAPIServerRequests), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerRequests)(nil), (*KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(a.(*core.KubeAPIServerRequests), b.(*KubeAPIServerRequests), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeControllerManagerConfig)(nil), (*core.KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(a.(*KubeControllerManagerConfig), b.(*core.KubeControllerManagerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeControllerManagerConfig)(nil), (*KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(a.(*core.KubeControllerManagerConfig), b.(*KubeControllerManagerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeProxyConfig)(nil), (*core.KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(a.(*KubeProxyConfig), b.(*core.KubeProxyConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeProxyConfig)(nil), (*KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(a.(*core.KubeProxyConfig), b.(*KubeProxyConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeSchedulerConfig)(nil), (*core.KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(a.(*KubeSchedulerConfig), b.(*core.KubeSchedulerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(a.(*core.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfig)(nil), (*core.KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeletConfig_To_core_KubeletConfig(a.(*KubeletConfig), b.(*core.KubeletConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfig)(nil), (*KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfig_To_v1beta1_KubeletConfig(a.(*core.KubeletConfig), b.(*KubeletConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigEviction)(nil), (*core.KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(a.(*KubeletConfigEviction), b.(*core.KubeletConfigEviction), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEviction)(nil), (*KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(a.(*core.KubeletConfigEviction), b.(*KubeletConfigEviction), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionMinimumReclaim)(nil), (*core.KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(a.(*KubeletConfigEvictionMinimumReclaim), b.(*core.KubeletConfigEvictionMinimumReclaim), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionMinimumReclaim)(nil), (*KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(a.(*core.KubeletConfigEvictionMinimumReclaim), b.(*KubeletConfigEvictionMinimumReclaim), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionSoftGracePeriod)(nil), (*core.KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(a.(*KubeletConfigEvictionSoftGracePeriod), b.(*core.KubeletConfigEvictionSoftGracePeriod), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionSoftGracePeriod)(nil), (*KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(a.(*core.KubeletConfigEvictionSoftGracePeriod), b.(*KubeletConfigEvictionSoftGracePeriod), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeletConfigReserved)(nil), (*core.KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(a.(*KubeletConfigReserved), b.(*core.KubeletConfigReserved), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubeletConfigReserved)(nil), (*KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(a.(*core.KubeletConfigReserved), b.(*KubeletConfigReserved), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Kubernetes)(nil), (*core.Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Kubernetes_To_core_Kubernetes(a.(*Kubernetes), b.(*core.Kubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Kubernetes)(nil), (*Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Kubernetes_To_v1beta1_Kubernetes(a.(*core.Kubernetes), b.(*Kubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesConfig)(nil), (*core.KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(a.(*KubernetesConfig), b.(*core.KubernetesConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesConfig)(nil), (*KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(a.(*core.KubernetesConfig), b.(*KubernetesConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesDashboard)(nil), (*core.KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(a.(*KubernetesDashboard), b.(*core.KubernetesDashboard), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesDashboard)(nil), (*KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(a.(*core.KubernetesDashboard), b.(*KubernetesDashboard), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesInfo)(nil), (*core.KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(a.(*KubernetesInfo), b.(*core.KubernetesInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesInfo)(nil), (*KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(a.(*core.KubernetesInfo), b.(*KubernetesInfo), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesSettings)(nil), (*core.KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(a.(*KubernetesSettings), b.(*core.KubernetesSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.KubernetesSettings)(nil), (*KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(a.(*core.KubernetesSettings), b.(*KubernetesSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*LastError)(nil), (*core.LastError)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_LastError_To_core_LastError(a.(*LastError), b.(*core.LastError), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.LastError)(nil), (*LastError)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_LastError_To_v1beta1_LastError(a.(*core.LastError), b.(*LastError), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*LastOperation)(nil), (*core.LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_LastOperation_To_core_LastOperation(a.(*LastOperation), b.(*core.LastOperation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.LastOperation)(nil), (*LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_LastOperation_To_v1beta1_LastOperation(a.(*core.LastOperation), b.(*LastOperation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Machine)(nil), (*core.Machine)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Machine_To_core_Machine(a.(*Machine), b.(*core.Machine), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Machine)(nil), (*Machine)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Machine_To_v1beta1_Machine(a.(*core.Machine), b.(*Machine), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineControllerManagerSettings)(nil), (*core.MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(a.(*MachineControllerManagerSettings), b.(*core.MachineControllerManagerSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineControllerManagerSettings)(nil), (*MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(a.(*core.MachineControllerManagerSettings), b.(*MachineControllerManagerSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineImage)(nil), (*core.MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MachineImage_To_core_MachineImage(a.(*MachineImage), b.(*core.MachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineImage)(nil), (*MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineImage_To_v1beta1_MachineImage(a.(*core.MachineImage), b.(*MachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineImageVersion)(nil), (*core.MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(a.(*MachineImageVersion), b.(*core.MachineImageVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineImageVersion)(nil), (*MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(a.(*core.MachineImageVersion), b.(*MachineImageVersion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineType)(nil), (*core.MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MachineType_To_core_MachineType(a.(*MachineType), b.(*core.MachineType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineType)(nil), (*MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineType_To_v1beta1_MachineType(a.(*core.MachineType), b.(*MachineType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MachineTypeStorage)(nil), (*core.MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(a.(*MachineTypeStorage), b.(*core.MachineTypeStorage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MachineTypeStorage)(nil), (*MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(a.(*core.MachineTypeStorage), b.(*MachineTypeStorage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Maintenance)(nil), (*core.Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Maintenance_To_core_Maintenance(a.(*Maintenance), b.(*core.Maintenance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Maintenance)(nil), (*Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Maintenance_To_v1beta1_Maintenance(a.(*core.Maintenance), b.(*Maintenance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MaintenanceAutoUpdate)(nil), (*core.MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(a.(*MaintenanceAutoUpdate), b.(*core.MaintenanceAutoUpdate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MaintenanceAutoUpdate)(nil), (*MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(a.(*core.MaintenanceAutoUpdate), b.(*MaintenanceAutoUpdate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MaintenanceTimeWindow)(nil), (*core.MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(a.(*MaintenanceTimeWindow), b.(*core.MaintenanceTimeWindow), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.MaintenanceTimeWindow)(nil), (*MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(a.(*core.MaintenanceTimeWindow), b.(*MaintenanceTimeWindow), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Monitoring)(nil), (*core.Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Monitoring_To_core_Monitoring(a.(*Monitoring), b.(*core.Monitoring), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Monitoring)(nil), (*Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Monitoring_To_v1beta1_Monitoring(a.(*core.Monitoring), b.(*Monitoring), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*NamedResourceReference)(nil), (*core.NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(a.(*NamedResourceReference), b.(*core.NamedResourceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.NamedResourceReference)(nil), (*NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(a.(*core.NamedResourceReference), b.(*NamedResourceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*core.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Networking_To_core_Networking(a.(*Networking), b.(*core.Networking), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Networking_To_v1beta1_Networking(a.(*core.Networking), b.(*Networking), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*NginxIngress)(nil), (*core.NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_NginxIngress_To_core_NginxIngress(a.(*NginxIngress), b.(*core.NginxIngress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.NginxIngress)(nil), (*NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_NginxIngress_To_v1beta1_NginxIngress(a.(*core.NginxIngress), b.(*NginxIngress), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*OIDCConfig)(nil), (*core.OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_OIDCConfig_To_core_OIDCConfig(a.(*OIDCConfig), b.(*core.OIDCConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.OIDCConfig)(nil), (*OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_OIDCConfig_To_v1beta1_OIDCConfig(a.(*core.OIDCConfig), b.(*OIDCConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*OpenIDConnectClientAuthentication)(nil), (*core.OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(a.(*OpenIDConnectClientAuthentication), b.(*core.OpenIDConnectClientAuthentication), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.OpenIDConnectClientAuthentication)(nil), (*OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(a.(*core.OpenIDConnectClientAuthentication), b.(*OpenIDConnectClientAuthentication), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Plant)(nil), (*core.Plant)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Plant_To_core_Plant(a.(*Plant), b.(*core.Plant), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Plant)(nil), (*Plant)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Plant_To_v1beta1_Plant(a.(*core.Plant), b.(*Plant), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PlantList)(nil), (*core.PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_PlantList_To_core_PlantList(a.(*PlantList), b.(*core.PlantList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.PlantList)(nil), (*PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_PlantList_To_v1beta1_PlantList(a.(*core.PlantList), b.(*PlantList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PlantSpec)(nil), (*core.PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_PlantSpec_To_core_PlantSpec(a.(*PlantSpec), b.(*core.PlantSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.PlantSpec)(nil), (*PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_PlantSpec_To_v1beta1_PlantSpec(a.(*core.PlantSpec), b.(*PlantSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PlantStatus)(nil), (*core.PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_PlantStatus_To_core_PlantStatus(a.(*PlantStatus), b.(*core.PlantStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.PlantStatus)(nil), (*PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_PlantStatus_To_v1beta1_PlantStatus(a.(*core.PlantStatus), b.(*PlantStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Project)(nil), (*core.Project)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Project_To_core_Project(a.(*Project), b.(*core.Project), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Project)(nil), (*Project)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Project_To_v1beta1_Project(a.(*core.Project), b.(*Project), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ProjectList)(nil), (*core.ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ProjectList_To_core_ProjectList(a.(*ProjectList), b.(*core.ProjectList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ProjectList)(nil), (*ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectList_To_v1beta1_ProjectList(a.(*core.ProjectList), b.(*ProjectList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ProjectStatus)(nil), (*core.ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(a.(*ProjectStatus), b.(*core.ProjectStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ProjectStatus)(nil), (*ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(a.(*core.ProjectStatus), b.(*ProjectStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ProjectTolerations)(nil), (*core.ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(a.(*ProjectTolerations), b.(*core.ProjectTolerations), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ProjectTolerations)(nil), (*ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(a.(*core.ProjectTolerations), b.(*ProjectTolerations), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Provider)(nil), (*core.Provider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Provider_To_core_Provider(a.(*Provider), b.(*core.Provider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Provider)(nil), (*Provider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Provider_To_v1beta1_Provider(a.(*core.Provider), b.(*Provider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Quota)(nil), (*core.Quota)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Quota_To_core_Quota(a.(*Quota), b.(*core.Quota), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Quota)(nil), (*Quota)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Quota_To_v1beta1_Quota(a.(*core.Quota), b.(*Quota), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*QuotaList)(nil), (*core.QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_QuotaList_To_core_QuotaList(a.(*QuotaList), b.(*core.QuotaList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.QuotaList)(nil), (*QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_QuotaList_To_v1beta1_QuotaList(a.(*core.QuotaList), b.(*QuotaList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*QuotaSpec)(nil), (*core.QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(a.(*QuotaSpec), b.(*core.QuotaSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.QuotaSpec)(nil), (*QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(a.(*core.QuotaSpec), b.(*QuotaSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Region)(nil), (*core.Region)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Region_To_core_Region(a.(*Region), b.(*core.Region), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Region)(nil), (*Region)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Region_To_v1beta1_Region(a.(*core.Region), b.(*Region), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ResourceWatchCacheSize)(nil), (*core.ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(a.(*ResourceWatchCacheSize), b.(*core.ResourceWatchCacheSize), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ResourceWatchCacheSize)(nil), (*ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(a.(*core.ResourceWatchCacheSize), b.(*ResourceWatchCacheSize), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SecretBinding)(nil), (*core.SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SecretBinding_To_core_SecretBinding(a.(*SecretBinding), b.(*core.SecretBinding), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SecretBinding)(nil), (*SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SecretBinding_To_v1beta1_SecretBinding(a.(*core.SecretBinding), b.(*SecretBinding), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SecretBindingList)(nil), (*core.SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SecretBindingList_To_core_SecretBindingList(a.(*SecretBindingList), b.(*core.SecretBindingList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SecretBindingList)(nil), (*SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SecretBindingList_To_v1beta1_SecretBindingList(a.(*core.SecretBindingList), b.(*SecretBindingList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Seed)(nil), (*core.Seed)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Seed_To_core_Seed(a.(*Seed), b.(*core.Seed), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Seed)(nil), (*Seed)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Seed_To_v1beta1_Seed(a.(*core.Seed), b.(*Seed), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedBackup)(nil), (*core.SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedBackup_To_core_SeedBackup(a.(*SeedBackup), b.(*core.SeedBackup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedBackup)(nil), (*SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedBackup_To_v1beta1_SeedBackup(a.(*core.SeedBackup), b.(*SeedBackup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedDNS)(nil), (*core.SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedDNS_To_core_SeedDNS(a.(*SeedDNS), b.(*core.SeedDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedDNS)(nil), (*SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedDNS_To_v1beta1_SeedDNS(a.(*core.SeedDNS), b.(*SeedDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedDNSProvider)(nil), (*core.SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(a.(*SeedDNSProvider), b.(*core.SeedDNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedDNSProvider)(nil), (*SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(a.(*core.SeedDNSProvider), b.(*SeedDNSProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedList)(nil), (*core.SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedList_To_core_SeedList(a.(*SeedList), b.(*core.SeedList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedList)(nil), (*SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedList_To_v1beta1_SeedList(a.(*core.SeedList), b.(*SeedList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedNetworks)(nil), (*core.SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedNetworks_To_core_SeedNetworks(a.(*SeedNetworks), b.(*core.SeedNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedNetworks)(nil), (*SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(a.(*core.SeedNetworks), b.(*SeedNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedProvider)(nil), (*core.SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedProvider_To_core_SeedProvider(a.(*SeedProvider), b.(*core.SeedProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedProvider)(nil), (*SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedProvider_To_v1beta1_SeedProvider(a.(*core.SeedProvider), b.(*SeedProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSelector)(nil), (*core.SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSelector_To_core_SeedSelector(a.(*SeedSelector), b.(*core.SeedSelector), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSelector)(nil), (*SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSelector_To_v1beta1_SeedSelector(a.(*core.SeedSelector), b.(*SeedSelector), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingExcessCapacityReservation)(nil), (*core.SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(a.(*SeedSettingExcessCapacityReservation), b.(*core.SeedSettingExcessCapacityReservation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingExcessCapacityReservation)(nil), (*SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(a.(*core.SeedSettingExcessCapacityReservation), b.(*SeedSettingExcessCapacityReservation), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingLoadBalancerServices)(nil), (*core.SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(a.(*SeedSettingLoadBalancerServices), b.(*core.SeedSettingLoadBalancerServices), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingLoadBalancerServices)(nil), (*SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(a.(*core.SeedSettingLoadBalancerServices), b.(*SeedSettingLoadBalancerServices), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingScheduling)(nil), (*core.SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(a.(*SeedSettingScheduling), b.(*core.SeedSettingScheduling), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingScheduling)(nil), (*SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(a.(*core.SeedSettingScheduling), b.(*SeedSettingScheduling), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingShootDNS)(nil), (*core.SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(a.(*SeedSettingShootDNS), b.(*core.SeedSettingShootDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingShootDNS)(nil), (*SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(a.(*core.SeedSettingShootDNS), b.(*SeedSettingShootDNS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettingVerticalPodAutoscaler)(nil), (*core.SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(a.(*SeedSettingVerticalPodAutoscaler), b.(*core.SeedSettingVerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettingVerticalPodAutoscaler)(nil), (*SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(a.(*core.SeedSettingVerticalPodAutoscaler), b.(*SeedSettingVerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSettings)(nil), (*core.SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSettings_To_core_SeedSettings(a.(*SeedSettings), b.(*core.SeedSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSettings)(nil), (*SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSettings_To_v1beta1_SeedSettings(a.(*core.SeedSettings), b.(*SeedSettings), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedSpec)(nil), (*SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedSpec_To_v1beta1_SeedSpec(a.(*core.SeedSpec), b.(*SeedSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedStatus)(nil), (*core.SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedStatus_To_core_SeedStatus(a.(*SeedStatus), b.(*core.SeedStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedStatus)(nil), (*SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedStatus_To_v1beta1_SeedStatus(a.(*core.SeedStatus), b.(*SeedStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedTaint)(nil), (*core.SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedTaint_To_core_SeedTaint(a.(*SeedTaint), b.(*core.SeedTaint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedTaint)(nil), (*SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedTaint_To_v1beta1_SeedTaint(a.(*core.SeedTaint), b.(*SeedTaint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedVolume)(nil), (*core.SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedVolume_To_core_SeedVolume(a.(*SeedVolume), b.(*core.SeedVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedVolume)(nil), (*SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedVolume_To_v1beta1_SeedVolume(a.(*core.SeedVolume), b.(*SeedVolume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SeedVolumeProvider)(nil), (*core.SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(a.(*SeedVolumeProvider), b.(*core.SeedVolumeProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.SeedVolumeProvider)(nil), (*SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(a.(*core.SeedVolumeProvider), b.(*SeedVolumeProvider), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ServiceAccountConfig)(nil), (*core.ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(a.(*ServiceAccountConfig), b.(*core.ServiceAccountConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ServiceAccountConfig)(nil), (*ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(a.(*core.ServiceAccountConfig), b.(*ServiceAccountConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Shoot)(nil), (*core.Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Shoot_To_core_Shoot(a.(*Shoot), b.(*core.Shoot), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Shoot)(nil), (*Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Shoot_To_v1beta1_Shoot(a.(*core.Shoot), b.(*Shoot), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootList)(nil), (*core.ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ShootList_To_core_ShootList(a.(*ShootList), b.(*core.ShootList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootList)(nil), (*ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootList_To_v1beta1_ShootList(a.(*core.ShootList), b.(*ShootList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootMachineImage)(nil), (*core.ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(a.(*ShootMachineImage), b.(*core.ShootMachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootMachineImage)(nil), (*ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(a.(*core.ShootMachineImage), b.(*ShootMachineImage), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootNetworks)(nil), (*core.ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ShootNetworks_To_core_ShootNetworks(a.(*ShootNetworks), b.(*core.ShootNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootNetworks)(nil), (*ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootNetworks_To_v1beta1_ShootNetworks(a.(*core.ShootNetworks), b.(*ShootNetworks), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootSpec)(nil), (*core.ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ShootSpec_To_core_ShootSpec(a.(*ShootSpec), b.(*core.ShootSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootSpec)(nil), (*ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootSpec_To_v1beta1_ShootSpec(a.(*core.ShootSpec), b.(*ShootSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ShootStatus)(nil), (*core.ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ShootStatus_To_core_ShootStatus(a.(*ShootStatus), b.(*core.ShootStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.ShootStatus)(nil), (*ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ShootStatus_To_v1beta1_ShootStatus(a.(*core.ShootStatus), b.(*ShootStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Toleration_To_core_Toleration(a.(*Toleration), b.(*core.Toleration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Toleration_To_v1beta1_Toleration(a.(*core.Toleration), b.(*Toleration), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*VerticalPodAutoscaler)(nil), (*core.VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(a.(*VerticalPodAutoscaler), b.(*core.VerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.VerticalPodAutoscaler)(nil), (*VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(a.(*core.VerticalPodAutoscaler), b.(*VerticalPodAutoscaler), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Volume_To_core_Volume(a.(*Volume), b.(*core.Volume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Volume_To_v1beta1_Volume(a.(*core.Volume), b.(*Volume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*VolumeType)(nil), (*core.VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_VolumeType_To_core_VolumeType(a.(*VolumeType), b.(*core.VolumeType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.VolumeType)(nil), (*VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_VolumeType_To_v1beta1_VolumeType(a.(*core.VolumeType), b.(*VolumeType), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*WatchCacheSizes)(nil), (*core.WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(a.(*WatchCacheSizes), b.(*core.WatchCacheSizes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.WatchCacheSizes)(nil), (*WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(a.(*core.WatchCacheSizes), b.(*WatchCacheSizes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Worker)(nil), (*core.Worker)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Worker_To_core_Worker(a.(*Worker), b.(*core.Worker), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.Worker)(nil), (*Worker)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_Worker_To_v1beta1_Worker(a.(*core.Worker), b.(*Worker), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*WorkerKubernetes)(nil), (*core.WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(a.(*WorkerKubernetes), b.(*core.WorkerKubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.WorkerKubernetes)(nil), (*WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(a.(*core.WorkerKubernetes), b.(*WorkerKubernetes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*WorkerSystemComponents)(nil), (*core.WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(a.(*WorkerSystemComponents), b.(*core.WorkerSystemComponents), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*core.WorkerSystemComponents)(nil), (*WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(a.(*core.WorkerSystemComponents), b.(*WorkerSystemComponents), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectMember_To_v1beta1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*core.ProjectSpec)(nil), (*ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(a.(*core.ProjectSpec), b.(*ProjectSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*ProjectMember)(nil), (*core.ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ProjectMember_To_core_ProjectMember(a.(*ProjectMember), b.(*core.ProjectMember), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*ProjectSpec)(nil), (*core.ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(a.(*ProjectSpec), b.(*core.ProjectSpec), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1beta1_Addon_To_core_Addon is an autogenerated conversion function.
+func Convert_v1beta1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error {
+ return autoConvert_v1beta1_Addon_To_core_Addon(in, out, s)
+}
+
+func autoConvert_core_Addon_To_v1beta1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_Addon_To_v1beta1_Addon is an autogenerated conversion function.
+func Convert_core_Addon_To_v1beta1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error {
+ return autoConvert_core_Addon_To_v1beta1_Addon(in, out, s)
+}
+
+func autoConvert_v1beta1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error {
+ out.KubernetesDashboard = (*core.KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard))
+ out.NginxIngress = (*core.NginxIngress)(unsafe.Pointer(in.NginxIngress))
+ return nil
+}
+
+// Convert_v1beta1_Addons_To_core_Addons is an autogenerated conversion function.
+func Convert_v1beta1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error {
+ return autoConvert_v1beta1_Addons_To_core_Addons(in, out, s)
+}
+
+func autoConvert_core_Addons_To_v1beta1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error {
+ out.KubernetesDashboard = (*KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard))
+ out.NginxIngress = (*NginxIngress)(unsafe.Pointer(in.NginxIngress))
+ return nil
+}
+
+// Convert_core_Addons_To_v1beta1_Addons is an autogenerated conversion function.
+func Convert_core_Addons_To_v1beta1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error {
+ return autoConvert_core_Addons_To_v1beta1_Addons(in, out, s)
+}
+
+func autoConvert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config))
+ return nil
+}
+
+// Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin is an autogenerated conversion function.
+func Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error {
+ return autoConvert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in, out, s)
+}
+
+func autoConvert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config))
+ return nil
+}
+
+// Convert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin is an autogenerated conversion function.
+func Convert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error {
+ return autoConvert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in, out, s)
+}
+
+func autoConvert_v1beta1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error {
+ out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers))
+ return nil
+}
+
+// Convert_v1beta1_Alerting_To_core_Alerting is an autogenerated conversion function.
+func Convert_v1beta1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error {
+ return autoConvert_v1beta1_Alerting_To_core_Alerting(in, out, s)
+}
+
+func autoConvert_core_Alerting_To_v1beta1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error {
+ out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers))
+ return nil
+}
+
+// Convert_core_Alerting_To_v1beta1_Alerting is an autogenerated conversion function.
+func Convert_core_Alerting_To_v1beta1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error {
+ return autoConvert_core_Alerting_To_v1beta1_Alerting(in, out, s)
+}
+
+func autoConvert_v1beta1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error {
+ out.AuditPolicy = (*core.AuditPolicy)(unsafe.Pointer(in.AuditPolicy))
+ return nil
+}
+
+// Convert_v1beta1_AuditConfig_To_core_AuditConfig is an autogenerated conversion function.
+func Convert_v1beta1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_AuditConfig_To_core_AuditConfig(in, out, s)
+}
+
+func autoConvert_core_AuditConfig_To_v1beta1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error {
+ out.AuditPolicy = (*AuditPolicy)(unsafe.Pointer(in.AuditPolicy))
+ return nil
+}
+
+// Convert_core_AuditConfig_To_v1beta1_AuditConfig is an autogenerated conversion function.
+func Convert_core_AuditConfig_To_v1beta1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error {
+ return autoConvert_core_AuditConfig_To_v1beta1_AuditConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error {
+ out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef))
+ return nil
+}
+
+// Convert_v1beta1_AuditPolicy_To_core_AuditPolicy is an autogenerated conversion function.
+func Convert_v1beta1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error {
+ return autoConvert_v1beta1_AuditPolicy_To_core_AuditPolicy(in, out, s)
+}
+
+func autoConvert_core_AuditPolicy_To_v1beta1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error {
+ out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef))
+ return nil
+}
+
+// Convert_core_AuditPolicy_To_v1beta1_AuditPolicy is an autogenerated conversion function.
+func Convert_core_AuditPolicy_To_v1beta1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error {
+ return autoConvert_core_AuditPolicy_To_v1beta1_AuditPolicy(in, out, s)
+}
+
+func autoConvert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error {
+ out.Name = in.Name
+ out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes))
+ out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes))
+ return nil
+}
+
+// Convert_v1beta1_AvailabilityZone_To_core_AvailabilityZone is an autogenerated conversion function.
+func Convert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error {
+ return autoConvert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(in, out, s)
+}
+
+func autoConvert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error {
+ out.Name = in.Name
+ out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes))
+ out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes))
+ return nil
+}
+
+// Convert_core_AvailabilityZone_To_v1beta1_AvailabilityZone is an autogenerated conversion function.
+func Convert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error {
+ return autoConvert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_BackupBucket_To_core_BackupBucket is an autogenerated conversion function.
+func Convert_v1beta1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupBucket_To_core_BackupBucket(in, out, s)
+}
+
+func autoConvert_core_BackupBucket_To_v1beta1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_BackupBucket_To_v1beta1_BackupBucket is an autogenerated conversion function.
+func Convert_core_BackupBucket_To_v1beta1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error {
+ return autoConvert_core_BackupBucket_To_v1beta1_BackupBucket(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.BackupBucket)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_BackupBucketList_To_core_BackupBucketList is an autogenerated conversion function.
+func Convert_v1beta1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupBucketList_To_core_BackupBucketList(in, out, s)
+}
+
+func autoConvert_core_BackupBucketList_To_v1beta1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]BackupBucket)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_BackupBucketList_To_v1beta1_BackupBucketList is an autogenerated conversion function.
+func Convert_core_BackupBucketList_To_v1beta1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketList_To_v1beta1_BackupBucketList(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider is an autogenerated conversion function.
+func Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(in, out, s)
+}
+
+func autoConvert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider is an autogenerated conversion function.
+func Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.SecretRef = in.SecretRef
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ return nil
+}
+
+// Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec is an autogenerated conversion function.
+func Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in, out, s)
+}
+
+func autoConvert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error {
+ if err := Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.SecretRef = in.SecretRef
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ return nil
+}
+
+// Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec is an autogenerated conversion function.
+func Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error {
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef))
+ return nil
+}
+
+// Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus is an autogenerated conversion function.
+func Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in, out, s)
+}
+
+func autoConvert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error {
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef))
+ return nil
+}
+
+// Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus is an autogenerated conversion function.
+func Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error {
+ return autoConvert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_BackupEntry_To_core_BackupEntry is an autogenerated conversion function.
+func Convert_v1beta1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupEntry_To_core_BackupEntry(in, out, s)
+}
+
+func autoConvert_core_BackupEntry_To_v1beta1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_BackupEntry_To_v1beta1_BackupEntry is an autogenerated conversion function.
+func Convert_core_BackupEntry_To_v1beta1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error {
+ return autoConvert_core_BackupEntry_To_v1beta1_BackupEntry(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.BackupEntry)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_BackupEntryList_To_core_BackupEntryList is an autogenerated conversion function.
+func Convert_v1beta1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupEntryList_To_core_BackupEntryList(in, out, s)
+}
+
+func autoConvert_core_BackupEntryList_To_v1beta1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]BackupEntry)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_BackupEntryList_To_v1beta1_BackupEntryList is an autogenerated conversion function.
+func Convert_core_BackupEntryList_To_v1beta1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error {
+ return autoConvert_core_BackupEntryList_To_v1beta1_BackupEntryList(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error {
+ out.BucketName = in.BucketName
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ return nil
+}
+
+// Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec is an autogenerated conversion function.
+func Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(in, out, s)
+}
+
+func autoConvert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error {
+ out.BucketName = in.BucketName
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ return nil
+}
+
+// Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec is an autogenerated conversion function.
+func Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error {
+ return autoConvert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error {
+ out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+// Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus is an autogenerated conversion function.
+func Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(in, out, s)
+}
+
+func autoConvert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error {
+ out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastError = (*LastError)(unsafe.Pointer(in.LastError))
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+// Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus is an autogenerated conversion function.
+func Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error {
+ return autoConvert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_CRI_To_core_CRI(in *CRI, out *core.CRI, s conversion.Scope) error {
+ out.Name = core.CRIName(in.Name)
+ out.ContainerRuntimes = *(*[]core.ContainerRuntime)(unsafe.Pointer(&in.ContainerRuntimes))
+ return nil
+}
+
+// Convert_v1beta1_CRI_To_core_CRI is an autogenerated conversion function.
+func Convert_v1beta1_CRI_To_core_CRI(in *CRI, out *core.CRI, s conversion.Scope) error {
+ return autoConvert_v1beta1_CRI_To_core_CRI(in, out, s)
+}
+
+func autoConvert_core_CRI_To_v1beta1_CRI(in *core.CRI, out *CRI, s conversion.Scope) error {
+ out.Name = CRIName(in.Name)
+ out.ContainerRuntimes = *(*[]ContainerRuntime)(unsafe.Pointer(&in.ContainerRuntimes))
+ return nil
+}
+
+// Convert_core_CRI_To_v1beta1_CRI is an autogenerated conversion function.
+func Convert_core_CRI_To_v1beta1_CRI(in *core.CRI, out *CRI, s conversion.Scope) error {
+ return autoConvert_core_CRI_To_v1beta1_CRI(in, out, s)
+}
+
+func autoConvert_v1beta1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_v1beta1_CloudInfo_To_core_CloudInfo is an autogenerated conversion function.
+func Convert_v1beta1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error {
+ return autoConvert_v1beta1_CloudInfo_To_core_CloudInfo(in, out, s)
+}
+
+func autoConvert_core_CloudInfo_To_v1beta1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error {
+ out.Type = in.Type
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_core_CloudInfo_To_v1beta1_CloudInfo is an autogenerated conversion function.
+func Convert_core_CloudInfo_To_v1beta1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error {
+ return autoConvert_core_CloudInfo_To_v1beta1_CloudInfo(in, out, s)
+}
+
+func autoConvert_v1beta1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_CloudProfile_To_core_CloudProfile is an autogenerated conversion function.
+func Convert_v1beta1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error {
+ return autoConvert_v1beta1_CloudProfile_To_core_CloudProfile(in, out, s)
+}
+
+func autoConvert_core_CloudProfile_To_v1beta1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_CloudProfile_To_v1beta1_CloudProfile is an autogenerated conversion function.
+func Convert_core_CloudProfile_To_v1beta1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error {
+ return autoConvert_core_CloudProfile_To_v1beta1_CloudProfile(in, out, s)
+}
+
+func autoConvert_v1beta1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.CloudProfile)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_CloudProfileList_To_core_CloudProfileList is an autogenerated conversion function.
+func Convert_v1beta1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error {
+ return autoConvert_v1beta1_CloudProfileList_To_core_CloudProfileList(in, out, s)
+}
+
+func autoConvert_core_CloudProfileList_To_v1beta1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]CloudProfile)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_CloudProfileList_To_v1beta1_CloudProfileList is an autogenerated conversion function.
+func Convert_core_CloudProfileList_To_v1beta1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error {
+ return autoConvert_core_CloudProfileList_To_v1beta1_CloudProfileList(in, out, s)
+}
+
+func autoConvert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ if err := Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ out.MachineImages = *(*[]core.MachineImage)(unsafe.Pointer(&in.MachineImages))
+ out.MachineTypes = *(*[]core.MachineType)(unsafe.Pointer(&in.MachineTypes))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Regions = *(*[]core.Region)(unsafe.Pointer(&in.Regions))
+ out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Type = in.Type
+ out.VolumeTypes = *(*[]core.VolumeType)(unsafe.Pointer(&in.VolumeTypes))
+ return nil
+}
+
+// Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec is an autogenerated conversion function.
+func Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in, out, s)
+}
+
+func autoConvert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ if err := Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ out.MachineImages = *(*[]MachineImage)(unsafe.Pointer(&in.MachineImages))
+ out.MachineTypes = *(*[]MachineType)(unsafe.Pointer(&in.MachineTypes))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Regions = *(*[]Region)(unsafe.Pointer(&in.Regions))
+ out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Type = in.Type
+ out.VolumeTypes = *(*[]VolumeType)(unsafe.Pointer(&in.VolumeTypes))
+ return nil
+}
+
+// Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec is an autogenerated conversion function.
+func Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error {
+ return autoConvert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error {
+ out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd))
+ out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete))
+ out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure))
+ out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime))
+ out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold))
+ out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval))
+ return nil
+}
+
+// Convert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler is an autogenerated conversion function.
+func Convert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in, out, s)
+}
+
+func autoConvert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error {
+ out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd))
+ out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete))
+ out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure))
+ out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime))
+ out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold))
+ out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval))
+ return nil
+}
+
+// Convert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler is an autogenerated conversion function.
+func Convert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error {
+ return autoConvert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in, out, s)
+}
+
+func autoConvert_v1beta1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error {
+ if err := Convert_v1beta1_CloudInfo_To_core_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_ClusterInfo_To_core_ClusterInfo is an autogenerated conversion function.
+func Convert_v1beta1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error {
+ return autoConvert_v1beta1_ClusterInfo_To_core_ClusterInfo(in, out, s)
+}
+
+func autoConvert_core_ClusterInfo_To_v1beta1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error {
+ if err := Convert_core_CloudInfo_To_v1beta1_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil {
+ return err
+ }
+ if err := Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ClusterInfo_To_v1beta1_ClusterInfo is an autogenerated conversion function.
+func Convert_core_ClusterInfo_To_v1beta1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error {
+ return autoConvert_core_ClusterInfo_To_v1beta1_ClusterInfo(in, out, s)
+}
+
+func autoConvert_v1beta1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error {
+ out.Type = core.ConditionType(in.Type)
+ out.Status = core.ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes))
+ return nil
+}
+
+// Convert_v1beta1_Condition_To_core_Condition is an autogenerated conversion function.
+func Convert_v1beta1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error {
+ return autoConvert_v1beta1_Condition_To_core_Condition(in, out, s)
+}
+
+func autoConvert_core_Condition_To_v1beta1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error {
+ out.Type = ConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes))
+ return nil
+}
+
+// Convert_core_Condition_To_v1beta1_Condition is an autogenerated conversion function.
+func Convert_core_Condition_To_v1beta1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error {
+ return autoConvert_core_Condition_To_v1beta1_Condition(in, out, s)
+}
+
+func autoConvert_v1beta1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_v1beta1_ContainerRuntime_To_core_ContainerRuntime is an autogenerated conversion function.
+func Convert_v1beta1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error {
+ return autoConvert_v1beta1_ContainerRuntime_To_core_ContainerRuntime(in, out, s)
+}
+
+func autoConvert_core_ContainerRuntime_To_v1beta1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_core_ContainerRuntime_To_v1beta1_ContainerRuntime is an autogenerated conversion function.
+func Convert_core_ContainerRuntime_To_v1beta1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error {
+ return autoConvert_core_ContainerRuntime_To_v1beta1_ContainerRuntime(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Policy = (*core.ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy))
+ out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector))
+ return nil
+}
+
+// Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment is an autogenerated conversion function.
+func Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in, out, s)
+}
+
+func autoConvert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Policy = (*ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy))
+ out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector))
+ return nil
+}
+
+// Convert_core_ControllerDeployment_To_v1beta1_ControllerDeployment is an autogenerated conversion function.
+func Convert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error {
+ return autoConvert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_ControllerInstallation_To_core_ControllerInstallation is an autogenerated conversion function.
+func Convert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ControllerInstallation_To_v1beta1_ControllerInstallation is an autogenerated conversion function.
+func Convert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.ControllerInstallation)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList is an autogenerated conversion function.
+func Convert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]ControllerInstallation)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList is an autogenerated conversion function.
+func Convert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error {
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return nil
+}
+
+// Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec is an autogenerated conversion function.
+func Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error {
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return nil
+}
+
+// Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec is an autogenerated conversion function.
+func Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ return nil
+}
+
+// Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus is an autogenerated conversion function.
+func Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in, out, s)
+}
+
+func autoConvert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus))
+ return nil
+}
+
+// Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus is an autogenerated conversion function.
+func Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error {
+ return autoConvert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_ControllerRegistration_To_core_ControllerRegistration is an autogenerated conversion function.
+func Convert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(in, out, s)
+}
+
+func autoConvert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ControllerRegistration_To_v1beta1_ControllerRegistration is an autogenerated conversion function.
+func Convert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error {
+ return autoConvert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.ControllerRegistration)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList is an autogenerated conversion function.
+func Convert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(in, out, s)
+}
+
+func autoConvert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]ControllerRegistration)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList is an autogenerated conversion function.
+func Convert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error {
+ return autoConvert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error {
+ out.Resources = *(*[]core.ControllerResource)(unsafe.Pointer(&in.Resources))
+ out.Deployment = (*core.ControllerDeployment)(unsafe.Pointer(in.Deployment))
+ return nil
+}
+
+// Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec is an autogenerated conversion function.
+func Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in, out, s)
+}
+
+func autoConvert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error {
+ out.Resources = *(*[]ControllerResource)(unsafe.Pointer(&in.Resources))
+ out.Deployment = (*ControllerDeployment)(unsafe.Pointer(in.Deployment))
+ return nil
+}
+
+// Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec is an autogenerated conversion function.
+func Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error {
+ return autoConvert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Type = in.Type
+ out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled))
+ out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ return nil
+}
+
+// Convert_v1beta1_ControllerResource_To_core_ControllerResource is an autogenerated conversion function.
+func Convert_v1beta1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControllerResource_To_core_ControllerResource(in, out, s)
+}
+
+func autoConvert_core_ControllerResource_To_v1beta1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Type = in.Type
+ out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled))
+ out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ return nil
+}
+
+// Convert_core_ControllerResource_To_v1beta1_ControllerResource is an autogenerated conversion function.
+func Convert_core_ControllerResource_To_v1beta1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error {
+ return autoConvert_core_ControllerResource_To_v1beta1_ControllerResource(in, out, s)
+}
+
+func autoConvert_v1beta1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error {
+ out.Domain = (*string)(unsafe.Pointer(in.Domain))
+ out.Providers = *(*[]core.DNSProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_v1beta1_DNS_To_core_DNS is an autogenerated conversion function.
+func Convert_v1beta1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error {
+ return autoConvert_v1beta1_DNS_To_core_DNS(in, out, s)
+}
+
+func autoConvert_core_DNS_To_v1beta1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error {
+ out.Domain = (*string)(unsafe.Pointer(in.Domain))
+ out.Providers = *(*[]DNSProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_core_DNS_To_v1beta1_DNS is an autogenerated conversion function.
+func Convert_core_DNS_To_v1beta1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error {
+ return autoConvert_core_DNS_To_v1beta1_DNS(in, out, s)
+}
+
+func autoConvert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error {
+ out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
+ out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
+ return nil
+}
+
+// Convert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude is an autogenerated conversion function.
+func Convert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error {
+ return autoConvert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(in, out, s)
+}
+
+func autoConvert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error {
+ out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
+ out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
+ return nil
+}
+
+// Convert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude is an autogenerated conversion function.
+func Convert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error {
+ return autoConvert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(in, out, s)
+}
+
+func autoConvert_v1beta1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error {
+ out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ out.SecretName = (*string)(unsafe.Pointer(in.SecretName))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_v1beta1_DNSProvider_To_core_DNSProvider is an autogenerated conversion function.
+func Convert_v1beta1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error {
+ return autoConvert_v1beta1_DNSProvider_To_core_DNSProvider(in, out, s)
+}
+
+func autoConvert_core_DNSProvider_To_v1beta1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error {
+ out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Primary = (*bool)(unsafe.Pointer(in.Primary))
+ out.SecretName = (*string)(unsafe.Pointer(in.SecretName))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_core_DNSProvider_To_v1beta1_DNSProvider is an autogenerated conversion function.
+func Convert_core_DNSProvider_To_v1beta1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error {
+ return autoConvert_core_DNSProvider_To_v1beta1_DNSProvider(in, out, s)
+}
+
+func autoConvert_v1beta1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_v1beta1_DataVolume_To_core_DataVolume is an autogenerated conversion function.
+func Convert_v1beta1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error {
+ return autoConvert_v1beta1_DataVolume_To_core_DataVolume(in, out, s)
+}
+
+func autoConvert_core_DataVolume_To_v1beta1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_core_DataVolume_To_v1beta1_DataVolume is an autogenerated conversion function.
+func Convert_core_DataVolume_To_v1beta1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error {
+ return autoConvert_core_DataVolume_To_v1beta1_DataVolume(in, out, s)
+}
+
+func autoConvert_v1beta1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error {
+ out.Name = in.Name
+ out.URL = in.URL
+ out.Purpose = in.Purpose
+ return nil
+}
+
+// Convert_v1beta1_Endpoint_To_core_Endpoint is an autogenerated conversion function.
+func Convert_v1beta1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error {
+ return autoConvert_v1beta1_Endpoint_To_core_Endpoint(in, out, s)
+}
+
+func autoConvert_core_Endpoint_To_v1beta1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error {
+ out.Name = in.Name
+ out.URL = in.URL
+ out.Purpose = in.Purpose
+ return nil
+}
+
+// Convert_core_Endpoint_To_v1beta1_Endpoint is an autogenerated conversion function.
+func Convert_core_Endpoint_To_v1beta1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error {
+ return autoConvert_core_Endpoint_To_v1beta1_Endpoint(in, out, s)
+}
+
+func autoConvert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error {
+ out.Version = in.Version
+ out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate))
+ out.Classification = (*core.VersionClassification)(unsafe.Pointer(in.Classification))
+ return nil
+}
+
+// Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion is an autogenerated conversion function.
+func Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error {
+ return autoConvert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(in, out, s)
+}
+
+func autoConvert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error {
+ out.Version = in.Version
+ out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate))
+ out.Classification = (*VersionClassification)(unsafe.Pointer(in.Classification))
+ return nil
+}
+
+// Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion is an autogenerated conversion function.
+func Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error {
+ return autoConvert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in, out, s)
+}
+
+func autoConvert_v1beta1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Disabled = (*bool)(unsafe.Pointer(in.Disabled))
+ return nil
+}
+
+// Convert_v1beta1_Extension_To_core_Extension is an autogenerated conversion function.
+func Convert_v1beta1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error {
+ return autoConvert_v1beta1_Extension_To_core_Extension(in, out, s)
+}
+
+func autoConvert_core_Extension_To_v1beta1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Disabled = (*bool)(unsafe.Pointer(in.Disabled))
+ return nil
+}
+
+// Convert_core_Extension_To_v1beta1_Extension is an autogenerated conversion function.
+func Convert_core_Extension_To_v1beta1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error {
+ return autoConvert_core_Extension_To_v1beta1_Extension(in, out, s)
+}
+
+func autoConvert_v1beta1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Name = in.Name
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1beta1_Gardener_To_core_Gardener is an autogenerated conversion function.
+func Convert_v1beta1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error {
+ return autoConvert_v1beta1_Gardener_To_core_Gardener(in, out, s)
+}
+
+func autoConvert_core_Gardener_To_v1beta1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Name = in.Name
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_core_Gardener_To_v1beta1_Gardener is an autogenerated conversion function.
+func Convert_core_Gardener_To_v1beta1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error {
+ return autoConvert_core_Gardener_To_v1beta1_Gardener(in, out, s)
+}
+
+func autoConvert_v1beta1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error {
+ out.Enabled = (*bool)(unsafe.Pointer(in.Enabled))
+ out.Schedules = *(*[]core.HibernationSchedule)(unsafe.Pointer(&in.Schedules))
+ return nil
+}
+
+// Convert_v1beta1_Hibernation_To_core_Hibernation is an autogenerated conversion function.
+func Convert_v1beta1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error {
+ return autoConvert_v1beta1_Hibernation_To_core_Hibernation(in, out, s)
+}
+
+func autoConvert_core_Hibernation_To_v1beta1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error {
+ out.Enabled = (*bool)(unsafe.Pointer(in.Enabled))
+ out.Schedules = *(*[]HibernationSchedule)(unsafe.Pointer(&in.Schedules))
+ return nil
+}
+
+// Convert_core_Hibernation_To_v1beta1_Hibernation is an autogenerated conversion function.
+func Convert_core_Hibernation_To_v1beta1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error {
+ return autoConvert_core_Hibernation_To_v1beta1_Hibernation(in, out, s)
+}
+
+func autoConvert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error {
+ out.Start = (*string)(unsafe.Pointer(in.Start))
+ out.End = (*string)(unsafe.Pointer(in.End))
+ out.Location = (*string)(unsafe.Pointer(in.Location))
+ return nil
+}
+
+// Convert_v1beta1_HibernationSchedule_To_core_HibernationSchedule is an autogenerated conversion function.
+func Convert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error {
+ return autoConvert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(in, out, s)
+}
+
+func autoConvert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error {
+ out.Start = (*string)(unsafe.Pointer(in.Start))
+ out.End = (*string)(unsafe.Pointer(in.End))
+ out.Location = (*string)(unsafe.Pointer(in.Location))
+ return nil
+}
+
+// Convert_core_HibernationSchedule_To_v1beta1_HibernationSchedule is an autogenerated conversion function.
+func Convert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error {
+ return autoConvert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(in, out, s)
+}
+
+func autoConvert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod))
+ out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay))
+ out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization))
+ out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay))
+ out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod))
+ out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance))
+ out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay))
+ return nil
+}
+
+// Convert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig is an autogenerated conversion function.
+func Convert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in, out, s)
+}
+
+func autoConvert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod))
+ out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay))
+ out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization))
+ out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay))
+ out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod))
+ out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance))
+ out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay))
+ return nil
+}
+
+// Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig is an autogenerated conversion function.
+func Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error {
+ return autoConvert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error {
+ out.Domain = in.Domain
+ if err := Convert_v1beta1_IngressController_To_core_IngressController(&in.Controller, &out.Controller, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_Ingress_To_core_Ingress is an autogenerated conversion function.
+func Convert_v1beta1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error {
+ return autoConvert_v1beta1_Ingress_To_core_Ingress(in, out, s)
+}
+
+func autoConvert_core_Ingress_To_v1beta1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error {
+ out.Domain = in.Domain
+ if err := Convert_core_IngressController_To_v1beta1_IngressController(&in.Controller, &out.Controller, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Ingress_To_v1beta1_Ingress is an autogenerated conversion function.
+func Convert_core_Ingress_To_v1beta1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error {
+ return autoConvert_core_Ingress_To_v1beta1_Ingress(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_v1beta1_IngressController_To_core_IngressController is an autogenerated conversion function.
+func Convert_v1beta1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressController_To_core_IngressController(in, out, s)
+}
+
+func autoConvert_core_IngressController_To_v1beta1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ return nil
+}
+
+// Convert_core_IngressController_To_v1beta1_IngressController is an autogenerated conversion function.
+func Convert_core_IngressController_To_v1beta1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error {
+ return autoConvert_core_IngressController_To_v1beta1_IngressController(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error {
+ if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.AdmissionPlugins = *(*[]core.AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins))
+ out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences))
+ out.AuditConfig = (*core.AuditConfig)(unsafe.Pointer(in.AuditConfig))
+ out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication))
+ out.OIDCConfig = (*core.OIDCConfig)(unsafe.Pointer(in.OIDCConfig))
+ out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig))
+ out.ServiceAccountConfig = (*core.ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig))
+ out.WatchCacheSizes = (*core.WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes))
+ out.Requests = (*core.KubeAPIServerRequests)(unsafe.Pointer(in.Requests))
+ return nil
+}
+
+// Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig is an autogenerated conversion function.
+func Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in, out, s)
+}
+
+func autoConvert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.AdmissionPlugins = *(*[]AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins))
+ out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences))
+ out.AuditConfig = (*AuditConfig)(unsafe.Pointer(in.AuditConfig))
+ out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication))
+ out.OIDCConfig = (*OIDCConfig)(unsafe.Pointer(in.OIDCConfig))
+ out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig))
+ out.ServiceAccountConfig = (*ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig))
+ out.WatchCacheSizes = (*WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes))
+ out.Requests = (*KubeAPIServerRequests)(unsafe.Pointer(in.Requests))
+ return nil
+}
+
+// Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig is an autogenerated conversion function.
+func Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error {
+ out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight))
+ out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight))
+ return nil
+}
+
+// Convert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests is an autogenerated conversion function.
+func Convert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in, out, s)
+}
+
+func autoConvert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error {
+ out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight))
+ out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight))
+ return nil
+}
+
+// Convert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests is an autogenerated conversion function.
+func Convert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error {
+ return autoConvert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error {
+ if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.HorizontalPodAutoscalerConfig = (*core.HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig))
+ out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize))
+ out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout))
+ return nil
+}
+
+// Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig is an autogenerated conversion function.
+func Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in, out, s)
+}
+
+func autoConvert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.HorizontalPodAutoscalerConfig = (*HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig))
+ out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize))
+ out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout))
+ return nil
+}
+
+// Convert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig is an autogenerated conversion function.
+func Convert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error {
+ if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.Mode = (*core.ProxyMode)(unsafe.Pointer(in.Mode))
+ return nil
+}
+
+// Convert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig is an autogenerated conversion function.
+func Convert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(in, out, s)
+}
+
+func autoConvert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.Mode = (*ProxyMode)(unsafe.Pointer(in.Mode))
+ return nil
+}
+
+// Convert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig is an autogenerated conversion function.
+func Convert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error {
+ if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols))
+ return nil
+}
+
+// Convert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig is an autogenerated conversion function.
+func Convert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in, out, s)
+}
+
+func autoConvert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols))
+ return nil
+}
+
+// Convert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig is an autogenerated conversion function.
+func Convert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error {
+ if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota))
+ out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy))
+ out.EvictionHard = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard))
+ out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod))
+ out.EvictionMinimumReclaim = (*core.KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim))
+ out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod))
+ out.EvictionSoft = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft))
+ out.EvictionSoftGracePeriod = (*core.KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod))
+ out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods))
+ out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit))
+ out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline))
+ out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn))
+ out.KubeReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved))
+ out.SystemReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved))
+ return nil
+}
+
+// Convert_v1beta1_KubeletConfig_To_core_KubeletConfig is an autogenerated conversion function.
+func Convert_v1beta1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeletConfig_To_core_KubeletConfig(in, out, s)
+}
+
+func autoConvert_core_KubeletConfig_To_v1beta1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error {
+ if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil {
+ return err
+ }
+ out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota))
+ out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy))
+ out.EvictionHard = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard))
+ out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod))
+ out.EvictionMinimumReclaim = (*KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim))
+ out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod))
+ out.EvictionSoft = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft))
+ out.EvictionSoftGracePeriod = (*KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod))
+ out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods))
+ out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit))
+ out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline))
+ out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn))
+ out.KubeReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved))
+ out.SystemReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved))
+ return nil
+}
+
+// Convert_core_KubeletConfig_To_v1beta1_KubeletConfig is an autogenerated conversion function.
+func Convert_core_KubeletConfig_To_v1beta1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfig_To_v1beta1_KubeletConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error {
+ out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction is an autogenerated conversion function.
+func Convert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error {
+ out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction is an autogenerated conversion function.
+func Convert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function.
+func Convert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function.
+func Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function.
+func Convert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable))
+ out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable))
+ out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree))
+ out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable))
+ out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree))
+ return nil
+}
+
+// Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function.
+func Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error {
+ out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU))
+ out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory))
+ out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage))
+ out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID))
+ return nil
+}
+
+// Convert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved is an autogenerated conversion function.
+func Convert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(in, out, s)
+}
+
+func autoConvert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error {
+ out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU))
+ out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory))
+ out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage))
+ out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID))
+ return nil
+}
+
+// Convert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved is an autogenerated conversion function.
+func Convert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error {
+ return autoConvert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(in, out, s)
+}
+
+func autoConvert_v1beta1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error {
+ out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers))
+ out.ClusterAutoscaler = (*core.ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler))
+ out.KubeAPIServer = (*core.KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer))
+ out.KubeControllerManager = (*core.KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager))
+ out.KubeScheduler = (*core.KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler))
+ out.KubeProxy = (*core.KubeProxyConfig)(unsafe.Pointer(in.KubeProxy))
+ out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ out.Version = in.Version
+ out.VerticalPodAutoscaler = (*core.VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_v1beta1_Kubernetes_To_core_Kubernetes is an autogenerated conversion function.
+func Convert_v1beta1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error {
+ return autoConvert_v1beta1_Kubernetes_To_core_Kubernetes(in, out, s)
+}
+
+func autoConvert_core_Kubernetes_To_v1beta1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error {
+ out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers))
+ out.ClusterAutoscaler = (*ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler))
+ out.KubeAPIServer = (*KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer))
+ out.KubeControllerManager = (*KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager))
+ out.KubeScheduler = (*KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler))
+ out.KubeProxy = (*KubeProxyConfig)(unsafe.Pointer(in.KubeProxy))
+ out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ out.Version = in.Version
+ out.VerticalPodAutoscaler = (*VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_core_Kubernetes_To_v1beta1_Kubernetes is an autogenerated conversion function.
+func Convert_core_Kubernetes_To_v1beta1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error {
+ return autoConvert_core_Kubernetes_To_v1beta1_Kubernetes(in, out, s)
+}
+
+func autoConvert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error {
+ out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
+ return nil
+}
+
+// Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig is an autogenerated conversion function.
+func Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(in, out, s)
+}
+
+func autoConvert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error {
+ out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
+ return nil
+}
+
+// Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig is an autogenerated conversion function.
+func Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error {
+ return autoConvert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error {
+ if err := Convert_v1beta1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode))
+ return nil
+}
+
+// Convert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard is an autogenerated conversion function.
+func Convert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(in, out, s)
+}
+
+func autoConvert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error {
+ if err := Convert_core_Addon_To_v1beta1_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode))
+ return nil
+}
+
+// Convert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard is an autogenerated conversion function.
+func Convert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error {
+ return autoConvert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(in, out, s)
+}
+
+func autoConvert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error {
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo is an autogenerated conversion function.
+func Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(in, out, s)
+}
+
+func autoConvert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error {
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo is an autogenerated conversion function.
+func Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error {
+ return autoConvert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(in, out, s)
+}
+
+func autoConvert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error {
+ out.Versions = *(*[]core.ExpirableVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings is an autogenerated conversion function.
+func Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(in, out, s)
+}
+
+func autoConvert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error {
+ out.Versions = *(*[]ExpirableVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings is an autogenerated conversion function.
+func Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error {
+ return autoConvert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(in, out, s)
+}
+
+func autoConvert_v1beta1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error {
+ out.Description = in.Description
+ out.TaskID = (*string)(unsafe.Pointer(in.TaskID))
+ out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes))
+ out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime))
+ return nil
+}
+
+// Convert_v1beta1_LastError_To_core_LastError is an autogenerated conversion function.
+func Convert_v1beta1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error {
+ return autoConvert_v1beta1_LastError_To_core_LastError(in, out, s)
+}
+
+func autoConvert_core_LastError_To_v1beta1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error {
+ out.Description = in.Description
+ out.TaskID = (*string)(unsafe.Pointer(in.TaskID))
+ out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes))
+ out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime))
+ return nil
+}
+
+// Convert_core_LastError_To_v1beta1_LastError is an autogenerated conversion function.
+func Convert_core_LastError_To_v1beta1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error {
+ return autoConvert_core_LastError_To_v1beta1_LastError(in, out, s)
+}
+
+func autoConvert_v1beta1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error {
+ out.Description = in.Description
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Progress = in.Progress
+ out.State = core.LastOperationState(in.State)
+ out.Type = core.LastOperationType(in.Type)
+ return nil
+}
+
+// Convert_v1beta1_LastOperation_To_core_LastOperation is an autogenerated conversion function.
+func Convert_v1beta1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error {
+ return autoConvert_v1beta1_LastOperation_To_core_LastOperation(in, out, s)
+}
+
+func autoConvert_core_LastOperation_To_v1beta1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error {
+ out.Description = in.Description
+ out.LastUpdateTime = in.LastUpdateTime
+ out.Progress = in.Progress
+ out.State = LastOperationState(in.State)
+ out.Type = LastOperationType(in.Type)
+ return nil
+}
+
+// Convert_core_LastOperation_To_v1beta1_LastOperation is an autogenerated conversion function.
+func Convert_core_LastOperation_To_v1beta1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error {
+ return autoConvert_core_LastOperation_To_v1beta1_LastOperation(in, out, s)
+}
+
+func autoConvert_v1beta1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error {
+ out.Type = in.Type
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(core.ShootMachineImage)
+ if err := Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Image = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_Machine_To_core_Machine is an autogenerated conversion function.
+func Convert_v1beta1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error {
+ return autoConvert_v1beta1_Machine_To_core_Machine(in, out, s)
+}
+
+func autoConvert_core_Machine_To_v1beta1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error {
+ out.Type = in.Type
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ShootMachineImage)
+ if err := Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Image = nil
+ }
+ return nil
+}
+
+// Convert_core_Machine_To_v1beta1_Machine is an autogenerated conversion function.
+func Convert_core_Machine_To_v1beta1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error {
+ return autoConvert_core_Machine_To_v1beta1_Machine(in, out, s)
+}
+
+func autoConvert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error {
+ out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout))
+ out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout))
+ out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout))
+ out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries))
+ out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions))
+ return nil
+}
+
+// Convert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings is an autogenerated conversion function.
+func Convert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error {
+ return autoConvert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in, out, s)
+}
+
+func autoConvert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error {
+ out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout))
+ out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout))
+ out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout))
+ out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries))
+ out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions))
+ return nil
+}
+
+// Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings is an autogenerated conversion function.
+func Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error {
+ return autoConvert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(in, out, s)
+}
+
+func autoConvert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Versions = *(*[]core.MachineImageVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_v1beta1_MachineImage_To_core_MachineImage is an autogenerated conversion function.
+func Convert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error {
+ return autoConvert_v1beta1_MachineImage_To_core_MachineImage(in, out, s)
+}
+
+func autoConvert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Versions = *(*[]MachineImageVersion)(unsafe.Pointer(&in.Versions))
+ return nil
+}
+
+// Convert_core_MachineImage_To_v1beta1_MachineImage is an autogenerated conversion function.
+func Convert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error {
+ return autoConvert_core_MachineImage_To_v1beta1_MachineImage(in, out, s)
+}
+
+func autoConvert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error {
+ if err := Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil {
+ return err
+ }
+ out.CRI = *(*[]core.CRI)(unsafe.Pointer(&in.CRI))
+ return nil
+}
+
+// Convert_v1beta1_MachineImageVersion_To_core_MachineImageVersion is an autogenerated conversion function.
+func Convert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error {
+ return autoConvert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(in, out, s)
+}
+
+func autoConvert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error {
+ if err := Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil {
+ return err
+ }
+ out.CRI = *(*[]CRI)(unsafe.Pointer(&in.CRI))
+ return nil
+}
+
+// Convert_core_MachineImageVersion_To_v1beta1_MachineImageVersion is an autogenerated conversion function.
+func Convert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error {
+ return autoConvert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(in, out, s)
+}
+
+func autoConvert_v1beta1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error {
+ out.CPU = in.CPU
+ out.GPU = in.GPU
+ out.Memory = in.Memory
+ out.Name = in.Name
+ out.Storage = (*core.MachineTypeStorage)(unsafe.Pointer(in.Storage))
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_v1beta1_MachineType_To_core_MachineType is an autogenerated conversion function.
+func Convert_v1beta1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error {
+ return autoConvert_v1beta1_MachineType_To_core_MachineType(in, out, s)
+}
+
+func autoConvert_core_MachineType_To_v1beta1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error {
+ out.CPU = in.CPU
+ out.GPU = in.GPU
+ out.Memory = in.Memory
+ out.Name = in.Name
+ out.Storage = (*MachineTypeStorage)(unsafe.Pointer(in.Storage))
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_core_MachineType_To_v1beta1_MachineType is an autogenerated conversion function.
+func Convert_core_MachineType_To_v1beta1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error {
+ return autoConvert_core_MachineType_To_v1beta1_MachineType(in, out, s)
+}
+
+func autoConvert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error {
+ out.Class = in.Class
+ out.StorageSize = in.StorageSize
+ out.Type = in.Type
+ return nil
+}
+
+// Convert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage is an autogenerated conversion function.
+func Convert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error {
+ return autoConvert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(in, out, s)
+}
+
+func autoConvert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error {
+ out.Class = in.Class
+ out.StorageSize = in.StorageSize
+ out.Type = in.Type
+ return nil
+}
+
+// Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage is an autogenerated conversion function.
+func Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error {
+ return autoConvert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in, out, s)
+}
+
+func autoConvert_v1beta1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error {
+ out.AutoUpdate = (*core.MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate))
+ out.TimeWindow = (*core.MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow))
+ out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout))
+ return nil
+}
+
+// Convert_v1beta1_Maintenance_To_core_Maintenance is an autogenerated conversion function.
+func Convert_v1beta1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error {
+ return autoConvert_v1beta1_Maintenance_To_core_Maintenance(in, out, s)
+}
+
+func autoConvert_core_Maintenance_To_v1beta1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error {
+ out.AutoUpdate = (*MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate))
+ out.TimeWindow = (*MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow))
+ out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout))
+ return nil
+}
+
+// Convert_core_Maintenance_To_v1beta1_Maintenance is an autogenerated conversion function.
+func Convert_core_Maintenance_To_v1beta1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error {
+ return autoConvert_core_Maintenance_To_v1beta1_Maintenance(in, out, s)
+}
+
+func autoConvert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error {
+ out.KubernetesVersion = in.KubernetesVersion
+ out.MachineImageVersion = in.MachineImageVersion
+ return nil
+}
+
+// Convert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate is an autogenerated conversion function.
+func Convert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error {
+ return autoConvert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in, out, s)
+}
+
+func autoConvert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error {
+ out.KubernetesVersion = in.KubernetesVersion
+ out.MachineImageVersion = in.MachineImageVersion
+ return nil
+}
+
+// Convert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate is an autogenerated conversion function.
+func Convert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error {
+ return autoConvert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(in, out, s)
+}
+
+func autoConvert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error {
+ out.Begin = in.Begin
+ out.End = in.End
+ return nil
+}
+
+// Convert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow is an autogenerated conversion function.
+func Convert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error {
+ return autoConvert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in, out, s)
+}
+
+func autoConvert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error {
+ out.Begin = in.Begin
+ out.End = in.End
+ return nil
+}
+
+// Convert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow is an autogenerated conversion function.
+func Convert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error {
+ return autoConvert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(in, out, s)
+}
+
+func autoConvert_v1beta1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error {
+ out.Alerting = (*core.Alerting)(unsafe.Pointer(in.Alerting))
+ return nil
+}
+
+// Convert_v1beta1_Monitoring_To_core_Monitoring is an autogenerated conversion function.
+func Convert_v1beta1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error {
+ return autoConvert_v1beta1_Monitoring_To_core_Monitoring(in, out, s)
+}
+
+func autoConvert_core_Monitoring_To_v1beta1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error {
+ out.Alerting = (*Alerting)(unsafe.Pointer(in.Alerting))
+ return nil
+}
+
+// Convert_core_Monitoring_To_v1beta1_Monitoring is an autogenerated conversion function.
+func Convert_core_Monitoring_To_v1beta1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error {
+ return autoConvert_core_Monitoring_To_v1beta1_Monitoring(in, out, s)
+}
+
+func autoConvert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ResourceRef = in.ResourceRef
+ return nil
+}
+
+// Convert_v1beta1_NamedResourceReference_To_core_NamedResourceReference is an autogenerated conversion function.
+func Convert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error {
+ return autoConvert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(in, out, s)
+}
+
+func autoConvert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ResourceRef = in.ResourceRef
+ return nil
+}
+
+// Convert_core_NamedResourceReference_To_v1beta1_NamedResourceReference is an autogenerated conversion function.
+func Convert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error {
+ return autoConvert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(in, out, s)
+}
+
+func autoConvert_v1beta1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_v1beta1_Networking_To_core_Networking is an autogenerated conversion function.
+func Convert_v1beta1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error {
+ return autoConvert_v1beta1_Networking_To_core_Networking(in, out, s)
+}
+
+func autoConvert_core_Networking_To_v1beta1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_core_Networking_To_v1beta1_Networking is an autogenerated conversion function.
+func Convert_core_Networking_To_v1beta1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error {
+ return autoConvert_core_Networking_To_v1beta1_Networking(in, out, s)
+}
+
+func autoConvert_v1beta1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error {
+ if err := Convert_v1beta1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
+ out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config))
+ out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy))
+ return nil
+}
+
+// Convert_v1beta1_NginxIngress_To_core_NginxIngress is an autogenerated conversion function.
+func Convert_v1beta1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error {
+ return autoConvert_v1beta1_NginxIngress_To_core_NginxIngress(in, out, s)
+}
+
+func autoConvert_core_NginxIngress_To_v1beta1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error {
+ if err := Convert_core_Addon_To_v1beta1_Addon(&in.Addon, &out.Addon, s); err != nil {
+ return err
+ }
+ out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
+ out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config))
+ out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy))
+ return nil
+}
+
+// Convert_core_NginxIngress_To_v1beta1_NginxIngress is an autogenerated conversion function.
+func Convert_core_NginxIngress_To_v1beta1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error {
+ return autoConvert_core_NginxIngress_To_v1beta1_NginxIngress(in, out, s)
+}
+
+func autoConvert_v1beta1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.ClientAuthentication = (*core.OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication))
+ out.ClientID = (*string)(unsafe.Pointer(in.ClientID))
+ out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
+ out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
+ out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL))
+ out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
+ out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs))
+ out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
+ out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
+ return nil
+}
+
+// Convert_v1beta1_OIDCConfig_To_core_OIDCConfig is an autogenerated conversion function.
+func Convert_v1beta1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_OIDCConfig_To_core_OIDCConfig(in, out, s)
+}
+
+func autoConvert_core_OIDCConfig_To_v1beta1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error {
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.ClientAuthentication = (*OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication))
+ out.ClientID = (*string)(unsafe.Pointer(in.ClientID))
+ out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
+ out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
+ out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL))
+ out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
+ out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs))
+ out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
+ out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
+ return nil
+}
+
+// Convert_core_OIDCConfig_To_v1beta1_OIDCConfig is an autogenerated conversion function.
+func Convert_core_OIDCConfig_To_v1beta1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error {
+ return autoConvert_core_OIDCConfig_To_v1beta1_OIDCConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig))
+ out.Secret = (*string)(unsafe.Pointer(in.Secret))
+ return nil
+}
+
+// Convert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication is an autogenerated conversion function.
+func Convert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ return autoConvert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in, out, s)
+}
+
+func autoConvert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig))
+ out.Secret = (*string)(unsafe.Pointer(in.Secret))
+ return nil
+}
+
+// Convert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication is an autogenerated conversion function.
+func Convert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error {
+ return autoConvert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(in, out, s)
+}
+
+func autoConvert_v1beta1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_PlantSpec_To_core_PlantSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_PlantStatus_To_core_PlantStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_Plant_To_core_Plant is an autogenerated conversion function.
+func Convert_v1beta1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error {
+ return autoConvert_v1beta1_Plant_To_core_Plant(in, out, s)
+}
+
+func autoConvert_core_Plant_To_v1beta1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_PlantSpec_To_v1beta1_PlantSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_PlantStatus_To_v1beta1_PlantStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Plant_To_v1beta1_Plant is an autogenerated conversion function.
+func Convert_core_Plant_To_v1beta1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error {
+ return autoConvert_core_Plant_To_v1beta1_Plant(in, out, s)
+}
+
+func autoConvert_v1beta1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.Plant)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_PlantList_To_core_PlantList is an autogenerated conversion function.
+func Convert_v1beta1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error {
+ return autoConvert_v1beta1_PlantList_To_core_PlantList(in, out, s)
+}
+
+func autoConvert_core_PlantList_To_v1beta1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]Plant)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_PlantList_To_v1beta1_PlantList is an autogenerated conversion function.
+func Convert_core_PlantList_To_v1beta1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error {
+ return autoConvert_core_PlantList_To_v1beta1_PlantList(in, out, s)
+}
+
+func autoConvert_v1beta1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error {
+ out.SecretRef = in.SecretRef
+ out.Endpoints = *(*[]core.Endpoint)(unsafe.Pointer(&in.Endpoints))
+ return nil
+}
+
+// Convert_v1beta1_PlantSpec_To_core_PlantSpec is an autogenerated conversion function.
+func Convert_v1beta1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_PlantSpec_To_core_PlantSpec(in, out, s)
+}
+
+func autoConvert_core_PlantSpec_To_v1beta1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error {
+ out.SecretRef = in.SecretRef
+ out.Endpoints = *(*[]Endpoint)(unsafe.Pointer(&in.Endpoints))
+ return nil
+}
+
+// Convert_core_PlantSpec_To_v1beta1_PlantSpec is an autogenerated conversion function.
+func Convert_core_PlantSpec_To_v1beta1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error {
+ return autoConvert_core_PlantSpec_To_v1beta1_PlantSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
+ out.ClusterInfo = (*core.ClusterInfo)(unsafe.Pointer(in.ClusterInfo))
+ return nil
+}
+
+// Convert_v1beta1_PlantStatus_To_core_PlantStatus is an autogenerated conversion function.
+func Convert_v1beta1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_PlantStatus_To_core_PlantStatus(in, out, s)
+}
+
+func autoConvert_core_PlantStatus_To_v1beta1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
+ out.ClusterInfo = (*ClusterInfo)(unsafe.Pointer(in.ClusterInfo))
+ return nil
+}
+
+// Convert_core_PlantStatus_To_v1beta1_PlantStatus is an autogenerated conversion function.
+func Convert_core_PlantStatus_To_v1beta1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error {
+ return autoConvert_core_PlantStatus_To_v1beta1_PlantStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_Project_To_core_Project is an autogenerated conversion function.
+func Convert_v1beta1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error {
+ return autoConvert_v1beta1_Project_To_core_Project(in, out, s)
+}
+
+func autoConvert_core_Project_To_v1beta1_Project(in *core.Project, out *Project, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Project_To_v1beta1_Project is an autogenerated conversion function.
+func Convert_core_Project_To_v1beta1_Project(in *core.Project, out *Project, s conversion.Scope) error {
+ return autoConvert_core_Project_To_v1beta1_Project(in, out, s)
+}
+
+func autoConvert_v1beta1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.Project, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Project_To_core_Project(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_ProjectList_To_core_ProjectList is an autogenerated conversion function.
+func Convert_v1beta1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ProjectList_To_core_ProjectList(in, out, s)
+}
+
+func autoConvert_core_ProjectList_To_v1beta1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ if err := Convert_core_Project_To_v1beta1_Project(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_ProjectList_To_v1beta1_ProjectList is an autogenerated conversion function.
+func Convert_core_ProjectList_To_v1beta1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error {
+ return autoConvert_core_ProjectList_To_v1beta1_ProjectList(in, out, s)
+}
+
+func autoConvert_v1beta1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error {
+ out.Subject = in.Subject
+ // WARNING: in.Role requires manual conversion: does not exist in peer-type
+ out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles))
+ return nil
+}
+
+func autoConvert_core_ProjectMember_To_v1beta1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error {
+ out.Subject = in.Subject
+ out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles))
+ return nil
+}
+
+func autoConvert_v1beta1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error {
+ out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy))
+ out.Description = (*string)(unsafe.Pointer(in.Description))
+ out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner))
+ out.Purpose = (*string)(unsafe.Pointer(in.Purpose))
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]core.ProjectMember, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_ProjectMember_To_core_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Members = nil
+ }
+ out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
+ out.Tolerations = (*core.ProjectTolerations)(unsafe.Pointer(in.Tolerations))
+ return nil
+}
+
+func autoConvert_core_ProjectSpec_To_v1beta1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error {
+ out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy))
+ out.Description = (*string)(unsafe.Pointer(in.Description))
+ out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner))
+ out.Purpose = (*string)(unsafe.Pointer(in.Purpose))
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]ProjectMember, len(*in))
+ for i := range *in {
+ if err := Convert_core_ProjectMember_To_v1beta1_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Members = nil
+ }
+ out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
+ out.Tolerations = (*ProjectTolerations)(unsafe.Pointer(in.Tolerations))
+ return nil
+}
+
+func autoConvert_v1beta1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Phase = core.ProjectPhase(in.Phase)
+ out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp))
+ out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp))
+ return nil
+}
+
+// Convert_v1beta1_ProjectStatus_To_core_ProjectStatus is an autogenerated conversion function.
+func Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_ProjectStatus_To_core_ProjectStatus(in, out, s)
+}
+
+func autoConvert_core_ProjectStatus_To_v1beta1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Phase = ProjectPhase(in.Phase)
+ out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp))
+ out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp))
+ return nil
+}
+
+// Convert_core_ProjectStatus_To_v1beta1_ProjectStatus is an autogenerated conversion function.
+func Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error {
+ return autoConvert_core_ProjectStatus_To_v1beta1_ProjectStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error {
+ out.Defaults = *(*[]core.Toleration)(unsafe.Pointer(&in.Defaults))
+ out.Whitelist = *(*[]core.Toleration)(unsafe.Pointer(&in.Whitelist))
+ return nil
+}
+
+// Convert_v1beta1_ProjectTolerations_To_core_ProjectTolerations is an autogenerated conversion function.
+func Convert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error {
+ return autoConvert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(in, out, s)
+}
+
+func autoConvert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error {
+ out.Defaults = *(*[]Toleration)(unsafe.Pointer(&in.Defaults))
+ out.Whitelist = *(*[]Toleration)(unsafe.Pointer(&in.Whitelist))
+ return nil
+}
+
+// Convert_core_ProjectTolerations_To_v1beta1_ProjectTolerations is an autogenerated conversion function.
+func Convert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error {
+ return autoConvert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(in, out, s)
+}
+
+func autoConvert_v1beta1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig))
+ out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig))
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]core.Worker, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Worker_To_core_Worker(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Workers = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_Provider_To_core_Provider is an autogenerated conversion function.
+func Convert_v1beta1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error {
+ return autoConvert_v1beta1_Provider_To_core_Provider(in, out, s)
+}
+
+func autoConvert_core_Provider_To_v1beta1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig))
+ out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig))
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]Worker, len(*in))
+ for i := range *in {
+ if err := Convert_core_Worker_To_v1beta1_Worker(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Workers = nil
+ }
+ return nil
+}
+
+// Convert_core_Provider_To_v1beta1_Provider is an autogenerated conversion function.
+func Convert_core_Provider_To_v1beta1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error {
+ return autoConvert_core_Provider_To_v1beta1_Provider(in, out, s)
+}
+
+func autoConvert_v1beta1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_Quota_To_core_Quota is an autogenerated conversion function.
+func Convert_v1beta1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error {
+ return autoConvert_v1beta1_Quota_To_core_Quota(in, out, s)
+}
+
+func autoConvert_core_Quota_To_v1beta1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Quota_To_v1beta1_Quota is an autogenerated conversion function.
+func Convert_core_Quota_To_v1beta1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error {
+ return autoConvert_core_Quota_To_v1beta1_Quota(in, out, s)
+}
+
+func autoConvert_v1beta1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.Quota)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_QuotaList_To_core_QuotaList is an autogenerated conversion function.
+func Convert_v1beta1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error {
+ return autoConvert_v1beta1_QuotaList_To_core_QuotaList(in, out, s)
+}
+
+func autoConvert_core_QuotaList_To_v1beta1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]Quota)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_QuotaList_To_v1beta1_QuotaList is an autogenerated conversion function.
+func Convert_core_QuotaList_To_v1beta1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error {
+ return autoConvert_core_QuotaList_To_v1beta1_QuotaList(in, out, s)
+}
+
+func autoConvert_v1beta1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error {
+ out.ClusterLifetimeDays = (*int32)(unsafe.Pointer(in.ClusterLifetimeDays))
+ out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics))
+ out.Scope = in.Scope
+ return nil
+}
+
+// Convert_v1beta1_QuotaSpec_To_core_QuotaSpec is an autogenerated conversion function.
+func Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_QuotaSpec_To_core_QuotaSpec(in, out, s)
+}
+
+func autoConvert_core_QuotaSpec_To_v1beta1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error {
+ out.ClusterLifetimeDays = (*int32)(unsafe.Pointer(in.ClusterLifetimeDays))
+ out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics))
+ out.Scope = in.Scope
+ return nil
+}
+
+// Convert_core_QuotaSpec_To_v1beta1_QuotaSpec is an autogenerated conversion function.
+func Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error {
+ return autoConvert_core_QuotaSpec_To_v1beta1_QuotaSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Zones = *(*[]core.AvailabilityZone)(unsafe.Pointer(&in.Zones))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ return nil
+}
+
+// Convert_v1beta1_Region_To_core_Region is an autogenerated conversion function.
+func Convert_v1beta1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error {
+ return autoConvert_v1beta1_Region_To_core_Region(in, out, s)
+}
+
+func autoConvert_core_Region_To_v1beta1_Region(in *core.Region, out *Region, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Zones = *(*[]AvailabilityZone)(unsafe.Pointer(&in.Zones))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ return nil
+}
+
+// Convert_core_Region_To_v1beta1_Region is an autogenerated conversion function.
+func Convert_core_Region_To_v1beta1_Region(in *core.Region, out *Region, s conversion.Scope) error {
+ return autoConvert_core_Region_To_v1beta1_Region(in, out, s)
+}
+
+func autoConvert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error {
+ out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
+ out.Resource = in.Resource
+ out.CacheSize = in.CacheSize
+ return nil
+}
+
+// Convert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize is an autogenerated conversion function.
+func Convert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error {
+ return autoConvert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in, out, s)
+}
+
+func autoConvert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error {
+ out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
+ out.Resource = in.Resource
+ out.CacheSize = in.CacheSize
+ return nil
+}
+
+// Convert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize is an autogenerated conversion function.
+func Convert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error {
+ return autoConvert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(in, out, s)
+}
+
+func autoConvert_v1beta1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ out.SecretRef = in.SecretRef
+ out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas))
+ return nil
+}
+
+// Convert_v1beta1_SecretBinding_To_core_SecretBinding is an autogenerated conversion function.
+func Convert_v1beta1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error {
+ return autoConvert_v1beta1_SecretBinding_To_core_SecretBinding(in, out, s)
+}
+
+func autoConvert_core_SecretBinding_To_v1beta1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ out.SecretRef = in.SecretRef
+ out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas))
+ return nil
+}
+
+// Convert_core_SecretBinding_To_v1beta1_SecretBinding is an autogenerated conversion function.
+func Convert_core_SecretBinding_To_v1beta1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error {
+ return autoConvert_core_SecretBinding_To_v1beta1_SecretBinding(in, out, s)
+}
+
+func autoConvert_v1beta1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]core.SecretBinding)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_SecretBindingList_To_core_SecretBindingList is an autogenerated conversion function.
+func Convert_v1beta1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error {
+ return autoConvert_v1beta1_SecretBindingList_To_core_SecretBindingList(in, out, s)
+}
+
+func autoConvert_core_SecretBindingList_To_v1beta1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]SecretBinding)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_core_SecretBindingList_To_v1beta1_SecretBindingList is an autogenerated conversion function.
+func Convert_core_SecretBindingList_To_v1beta1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error {
+ return autoConvert_core_SecretBindingList_To_v1beta1_SecretBindingList(in, out, s)
+}
+
+func autoConvert_v1beta1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_SeedSpec_To_core_SeedSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SeedStatus_To_core_SeedStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_Seed_To_core_Seed is an autogenerated conversion function.
+func Convert_v1beta1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error {
+ return autoConvert_v1beta1_Seed_To_core_Seed(in, out, s)
+}
+
+func autoConvert_core_Seed_To_v1beta1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_SeedSpec_To_v1beta1_SeedSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_SeedStatus_To_v1beta1_SeedStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Seed_To_v1beta1_Seed is an autogenerated conversion function.
+func Convert_core_Seed_To_v1beta1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error {
+ return autoConvert_core_Seed_To_v1beta1_Seed(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error {
+ out.Provider = in.Provider
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = (*string)(unsafe.Pointer(in.Region))
+ out.SecretRef = in.SecretRef
+ return nil
+}
+
+// Convert_v1beta1_SeedBackup_To_core_SeedBackup is an autogenerated conversion function.
+func Convert_v1beta1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedBackup_To_core_SeedBackup(in, out, s)
+}
+
+func autoConvert_core_SeedBackup_To_v1beta1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error {
+ out.Provider = in.Provider
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = (*string)(unsafe.Pointer(in.Region))
+ out.SecretRef = in.SecretRef
+ return nil
+}
+
+// Convert_core_SeedBackup_To_v1beta1_SeedBackup is an autogenerated conversion function.
+func Convert_core_SeedBackup_To_v1beta1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error {
+ return autoConvert_core_SeedBackup_To_v1beta1_SeedBackup(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error {
+ out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain))
+ out.Provider = (*core.SeedDNSProvider)(unsafe.Pointer(in.Provider))
+ return nil
+}
+
+// Convert_v1beta1_SeedDNS_To_core_SeedDNS is an autogenerated conversion function.
+func Convert_v1beta1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedDNS_To_core_SeedDNS(in, out, s)
+}
+
+func autoConvert_core_SeedDNS_To_v1beta1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error {
+ out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain))
+ out.Provider = (*SeedDNSProvider)(unsafe.Pointer(in.Provider))
+ return nil
+}
+
+// Convert_core_SeedDNS_To_v1beta1_SeedDNS is an autogenerated conversion function.
+func Convert_core_SeedDNS_To_v1beta1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error {
+ return autoConvert_core_SeedDNS_To_v1beta1_SeedDNS(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.SecretRef = in.SecretRef
+ out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider is an autogenerated conversion function.
+func Convert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(in, out, s)
+}
+
+func autoConvert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.SecretRef = in.SecretRef
+ out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains))
+ out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones))
+ return nil
+}
+
+// Convert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider is an autogenerated conversion function.
+func Convert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error {
+ return autoConvert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.Seed, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Seed_To_core_Seed(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_SeedList_To_core_SeedList is an autogenerated conversion function.
+func Convert_v1beta1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedList_To_core_SeedList(in, out, s)
+}
+
+func autoConvert_core_SeedList_To_v1beta1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Seed, len(*in))
+ for i := range *in {
+ if err := Convert_core_Seed_To_v1beta1_Seed(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_SeedList_To_v1beta1_SeedList is an autogenerated conversion function.
+func Convert_core_SeedList_To_v1beta1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error {
+ return autoConvert_core_SeedList_To_v1beta1_SeedList(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error {
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Pods = in.Pods
+ out.Services = in.Services
+ out.ShootDefaults = (*core.ShootNetworks)(unsafe.Pointer(in.ShootDefaults))
+ out.BlockCIDRs = *(*[]string)(unsafe.Pointer(&in.BlockCIDRs))
+ return nil
+}
+
+// Convert_v1beta1_SeedNetworks_To_core_SeedNetworks is an autogenerated conversion function.
+func Convert_v1beta1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedNetworks_To_core_SeedNetworks(in, out, s)
+}
+
+func autoConvert_core_SeedNetworks_To_v1beta1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error {
+ out.Nodes = (*string)(unsafe.Pointer(in.Nodes))
+ out.Pods = in.Pods
+ out.Services = in.Services
+ out.ShootDefaults = (*ShootNetworks)(unsafe.Pointer(in.ShootDefaults))
+ out.BlockCIDRs = *(*[]string)(unsafe.Pointer(&in.BlockCIDRs))
+ return nil
+}
+
+// Convert_core_SeedNetworks_To_v1beta1_SeedNetworks is an autogenerated conversion function.
+func Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error {
+ return autoConvert_core_SeedNetworks_To_v1beta1_SeedNetworks(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_v1beta1_SeedProvider_To_core_SeedProvider is an autogenerated conversion function.
+func Convert_v1beta1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedProvider_To_core_SeedProvider(in, out, s)
+}
+
+func autoConvert_core_SeedProvider_To_v1beta1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error {
+ out.Type = in.Type
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Region = in.Region
+ return nil
+}
+
+// Convert_core_SeedProvider_To_v1beta1_SeedProvider is an autogenerated conversion function.
+func Convert_core_SeedProvider_To_v1beta1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error {
+ return autoConvert_core_SeedProvider_To_v1beta1_SeedProvider(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error {
+ out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
+ out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes))
+ return nil
+}
+
+// Convert_v1beta1_SeedSelector_To_core_SeedSelector is an autogenerated conversion function.
+func Convert_v1beta1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSelector_To_core_SeedSelector(in, out, s)
+}
+
+func autoConvert_core_SeedSelector_To_v1beta1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error {
+ out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
+ out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes))
+ return nil
+}
+
+// Convert_core_SeedSelector_To_v1beta1_SeedSelector is an autogenerated conversion function.
+func Convert_core_SeedSelector_To_v1beta1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error {
+ return autoConvert_core_SeedSelector_To_v1beta1_SeedSelector(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation is an autogenerated conversion function.
+func Convert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in, out, s)
+}
+
+func autoConvert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation is an autogenerated conversion function.
+func Convert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ return nil
+}
+
+// Convert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices is an autogenerated conversion function.
+func Convert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in, out, s)
+}
+
+func autoConvert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ return nil
+}
+
+// Convert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices is an autogenerated conversion function.
+func Convert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error {
+ out.Visible = in.Visible
+ return nil
+}
+
+// Convert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling is an autogenerated conversion function.
+func Convert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(in, out, s)
+}
+
+func autoConvert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error {
+ out.Visible = in.Visible
+ return nil
+}
+
+// Convert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling is an autogenerated conversion function.
+func Convert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS is an autogenerated conversion function.
+func Convert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in, out, s)
+}
+
+func autoConvert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS is an autogenerated conversion function.
+func Convert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ return nil
+}
+
+// Convert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error {
+ out.ExcessCapacityReservation = (*core.SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation))
+ out.Scheduling = (*core.SeedSettingScheduling)(unsafe.Pointer(in.Scheduling))
+ out.ShootDNS = (*core.SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS))
+ out.LoadBalancerServices = (*core.SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices))
+ out.VerticalPodAutoscaler = (*core.SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_v1beta1_SeedSettings_To_core_SeedSettings is an autogenerated conversion function.
+func Convert_v1beta1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSettings_To_core_SeedSettings(in, out, s)
+}
+
+func autoConvert_core_SeedSettings_To_v1beta1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error {
+ out.ExcessCapacityReservation = (*SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation))
+ out.Scheduling = (*SeedSettingScheduling)(unsafe.Pointer(in.Scheduling))
+ out.ShootDNS = (*SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS))
+ out.LoadBalancerServices = (*SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices))
+ out.VerticalPodAutoscaler = (*SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler))
+ return nil
+}
+
+// Convert_core_SeedSettings_To_v1beta1_SeedSettings is an autogenerated conversion function.
+func Convert_core_SeedSettings_To_v1beta1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error {
+ return autoConvert_core_SeedSettings_To_v1beta1_SeedSettings(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error {
+ out.Backup = (*core.SeedBackup)(unsafe.Pointer(in.Backup))
+ if err := Convert_v1beta1_SeedDNS_To_core_SeedDNS(&in.DNS, &out.DNS, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SeedNetworks_To_core_SeedNetworks(&in.Networks, &out.Networks, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SeedProvider_To_core_SeedProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
+ out.Taints = *(*[]core.SeedTaint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*core.SeedVolume)(unsafe.Pointer(in.Volume))
+ out.Settings = (*core.SeedSettings)(unsafe.Pointer(in.Settings))
+ out.Ingress = (*core.Ingress)(unsafe.Pointer(in.Ingress))
+ return nil
+}
+
+// Convert_v1beta1_SeedSpec_To_core_SeedSpec is an autogenerated conversion function.
+func Convert_v1beta1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedSpec_To_core_SeedSpec(in, out, s)
+}
+
+func autoConvert_core_SeedSpec_To_v1beta1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error {
+ out.Backup = (*SeedBackup)(unsafe.Pointer(in.Backup))
+ if err := Convert_core_SeedDNS_To_v1beta1_SeedDNS(&in.DNS, &out.DNS, s); err != nil {
+ return err
+ }
+ if err := Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(&in.Networks, &out.Networks, s); err != nil {
+ return err
+ }
+ if err := Convert_core_SeedProvider_To_v1beta1_SeedProvider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
+ out.Settings = (*SeedSettings)(unsafe.Pointer(in.Settings))
+ out.Taints = *(*[]SeedTaint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*SeedVolume)(unsafe.Pointer(in.Volume))
+ out.Ingress = (*Ingress)(unsafe.Pointer(in.Ingress))
+ return nil
+}
+
+// Convert_core_SeedSpec_To_v1beta1_SeedSpec is an autogenerated conversion function.
+func Convert_core_SeedSpec_To_v1beta1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error {
+ return autoConvert_core_SeedSpec_To_v1beta1_SeedSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error {
+ out.Gardener = (*core.Gardener)(unsafe.Pointer(in.Gardener))
+ out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion))
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
+ out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable))
+ return nil
+}
+
+// Convert_v1beta1_SeedStatus_To_core_SeedStatus is an autogenerated conversion function.
+func Convert_v1beta1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedStatus_To_core_SeedStatus(in, out, s)
+}
+
+func autoConvert_core_SeedStatus_To_v1beta1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error {
+ out.Gardener = (*Gardener)(unsafe.Pointer(in.Gardener))
+ out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion))
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
+ out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable))
+ return nil
+}
+
+// Convert_core_SeedStatus_To_v1beta1_SeedStatus is an autogenerated conversion function.
+func Convert_core_SeedStatus_To_v1beta1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error {
+ return autoConvert_core_SeedStatus_To_v1beta1_SeedStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_v1beta1_SeedTaint_To_core_SeedTaint is an autogenerated conversion function.
+func Convert_v1beta1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedTaint_To_core_SeedTaint(in, out, s)
+}
+
+func autoConvert_core_SeedTaint_To_v1beta1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_core_SeedTaint_To_v1beta1_SeedTaint is an autogenerated conversion function.
+func Convert_core_SeedTaint_To_v1beta1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error {
+ return autoConvert_core_SeedTaint_To_v1beta1_SeedTaint(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error {
+ out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize))
+ out.Providers = *(*[]core.SeedVolumeProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_v1beta1_SeedVolume_To_core_SeedVolume is an autogenerated conversion function.
+func Convert_v1beta1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedVolume_To_core_SeedVolume(in, out, s)
+}
+
+func autoConvert_core_SeedVolume_To_v1beta1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error {
+ out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize))
+ out.Providers = *(*[]SeedVolumeProvider)(unsafe.Pointer(&in.Providers))
+ return nil
+}
+
+// Convert_core_SeedVolume_To_v1beta1_SeedVolume is an autogenerated conversion function.
+func Convert_core_SeedVolume_To_v1beta1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error {
+ return autoConvert_core_SeedVolume_To_v1beta1_SeedVolume(in, out, s)
+}
+
+func autoConvert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error {
+ out.Purpose = in.Purpose
+ out.Name = in.Name
+ return nil
+}
+
+// Convert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider is an autogenerated conversion function.
+func Convert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error {
+ return autoConvert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(in, out, s)
+}
+
+func autoConvert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error {
+ out.Purpose = in.Purpose
+ out.Name = in.Name
+ return nil
+}
+
+// Convert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider is an autogenerated conversion function.
+func Convert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error {
+ return autoConvert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(in, out, s)
+}
+
+func autoConvert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error {
+ out.Issuer = (*string)(unsafe.Pointer(in.Issuer))
+ out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret))
+ return nil
+}
+
+// Convert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig is an autogenerated conversion function.
+func Convert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(in, out, s)
+}
+
+func autoConvert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error {
+ out.Issuer = (*string)(unsafe.Pointer(in.Issuer))
+ out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret))
+ return nil
+}
+
+// Convert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig is an autogenerated conversion function.
+func Convert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error {
+ return autoConvert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_ShootSpec_To_core_ShootSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ShootStatus_To_core_ShootStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_Shoot_To_core_Shoot is an autogenerated conversion function.
+func Convert_v1beta1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error {
+ return autoConvert_v1beta1_Shoot_To_core_Shoot(in, out, s)
+}
+
+func autoConvert_core_Shoot_To_v1beta1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_core_ShootSpec_To_v1beta1_ShootSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_core_ShootStatus_To_v1beta1_ShootStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_Shoot_To_v1beta1_Shoot is an autogenerated conversion function.
+func Convert_core_Shoot_To_v1beta1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error {
+ return autoConvert_core_Shoot_To_v1beta1_Shoot(in, out, s)
+}
+
+func autoConvert_v1beta1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]core.Shoot, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Shoot_To_core_Shoot(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_ShootList_To_core_ShootList is an autogenerated conversion function.
+func Convert_v1beta1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ShootList_To_core_ShootList(in, out, s)
+}
+
+func autoConvert_core_ShootList_To_v1beta1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Shoot, len(*in))
+ for i := range *in {
+ if err := Convert_core_Shoot_To_v1beta1_Shoot(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_core_ShootList_To_v1beta1_ShootList is an autogenerated conversion function.
+func Convert_core_ShootList_To_v1beta1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error {
+ return autoConvert_core_ShootList_To_v1beta1_ShootList(in, out, s)
+}
+
+func autoConvert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ if err := metav1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage is an autogenerated conversion function.
+func Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error {
+ return autoConvert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in, out, s)
+}
+
+func autoConvert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ if err := metav1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage is an autogenerated conversion function.
+func Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error {
+ return autoConvert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in, out, s)
+}
+
+func autoConvert_v1beta1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error {
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_v1beta1_ShootNetworks_To_core_ShootNetworks is an autogenerated conversion function.
+func Convert_v1beta1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error {
+ return autoConvert_v1beta1_ShootNetworks_To_core_ShootNetworks(in, out, s)
+}
+
+func autoConvert_core_ShootNetworks_To_v1beta1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error {
+ out.Pods = (*string)(unsafe.Pointer(in.Pods))
+ out.Services = (*string)(unsafe.Pointer(in.Services))
+ return nil
+}
+
+// Convert_core_ShootNetworks_To_v1beta1_ShootNetworks is an autogenerated conversion function.
+func Convert_core_ShootNetworks_To_v1beta1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error {
+ return autoConvert_core_ShootNetworks_To_v1beta1_ShootNetworks(in, out, s)
+}
+
+func autoConvert_v1beta1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error {
+ out.Addons = (*core.Addons)(unsafe.Pointer(in.Addons))
+ out.CloudProfileName = in.CloudProfileName
+ out.DNS = (*core.DNS)(unsafe.Pointer(in.DNS))
+ out.Extensions = *(*[]core.Extension)(unsafe.Pointer(&in.Extensions))
+ out.Hibernation = (*core.Hibernation)(unsafe.Pointer(in.Hibernation))
+ if err := Convert_v1beta1_Kubernetes_To_core_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_Networking_To_core_Networking(&in.Networking, &out.Networking, s); err != nil {
+ return err
+ }
+ out.Maintenance = (*core.Maintenance)(unsafe.Pointer(in.Maintenance))
+ out.Monitoring = (*core.Monitoring)(unsafe.Pointer(in.Monitoring))
+ if err := Convert_v1beta1_Provider_To_core_Provider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.Purpose = (*core.ShootPurpose)(unsafe.Pointer(in.Purpose))
+ out.Region = in.Region
+ out.SecretBindingName = in.SecretBindingName
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Resources = *(*[]core.NamedResourceReference)(unsafe.Pointer(&in.Resources))
+ out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations))
+ return nil
+}
+
+// Convert_v1beta1_ShootSpec_To_core_ShootSpec is an autogenerated conversion function.
+func Convert_v1beta1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_ShootSpec_To_core_ShootSpec(in, out, s)
+}
+
+func autoConvert_core_ShootSpec_To_v1beta1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error {
+ out.Addons = (*Addons)(unsafe.Pointer(in.Addons))
+ out.CloudProfileName = in.CloudProfileName
+ out.DNS = (*DNS)(unsafe.Pointer(in.DNS))
+ out.Extensions = *(*[]Extension)(unsafe.Pointer(&in.Extensions))
+ out.Hibernation = (*Hibernation)(unsafe.Pointer(in.Hibernation))
+ if err := Convert_core_Kubernetes_To_v1beta1_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil {
+ return err
+ }
+ if err := Convert_core_Networking_To_v1beta1_Networking(&in.Networking, &out.Networking, s); err != nil {
+ return err
+ }
+ out.Maintenance = (*Maintenance)(unsafe.Pointer(in.Maintenance))
+ out.Monitoring = (*Monitoring)(unsafe.Pointer(in.Monitoring))
+ if err := Convert_core_Provider_To_v1beta1_Provider(&in.Provider, &out.Provider, s); err != nil {
+ return err
+ }
+ out.Purpose = (*ShootPurpose)(unsafe.Pointer(in.Purpose))
+ out.Region = in.Region
+ out.SecretBindingName = in.SecretBindingName
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector))
+ out.Resources = *(*[]NamedResourceReference)(unsafe.Pointer(&in.Resources))
+ out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations))
+ return nil
+}
+
+// Convert_core_ShootSpec_To_v1beta1_ShootSpec is an autogenerated conversion function.
+func Convert_core_ShootSpec_To_v1beta1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error {
+ return autoConvert_core_ShootSpec_To_v1beta1_ShootSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions))
+ out.Constraints = *(*[]core.Condition)(unsafe.Pointer(&in.Constraints))
+ if err := Convert_v1beta1_Gardener_To_core_Gardener(&in.Gardener, &out.Gardener, s); err != nil {
+ return err
+ }
+ out.IsHibernated = in.IsHibernated
+ out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastErrors = *(*[]core.LastError)(unsafe.Pointer(&in.LastErrors))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime))
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ out.TechnicalID = in.TechnicalID
+ out.UID = types.UID(in.UID)
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ return nil
+}
+
+// Convert_v1beta1_ShootStatus_To_core_ShootStatus is an autogenerated conversion function.
+func Convert_v1beta1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_ShootStatus_To_core_ShootStatus(in, out, s)
+}
+
+func autoConvert_core_ShootStatus_To_v1beta1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions))
+ out.Constraints = *(*[]Condition)(unsafe.Pointer(&in.Constraints))
+ if err := Convert_core_Gardener_To_v1beta1_Gardener(&in.Gardener, &out.Gardener, s); err != nil {
+ return err
+ }
+ out.IsHibernated = in.IsHibernated
+ out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation))
+ out.LastErrors = *(*[]LastError)(unsafe.Pointer(&in.LastErrors))
+ out.ObservedGeneration = in.ObservedGeneration
+ out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime))
+ out.SeedName = (*string)(unsafe.Pointer(in.SeedName))
+ out.TechnicalID = in.TechnicalID
+ out.UID = types.UID(in.UID)
+ out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity))
+ return nil
+}
+
+// Convert_core_ShootStatus_To_v1beta1_ShootStatus is an autogenerated conversion function.
+func Convert_core_ShootStatus_To_v1beta1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error {
+ return autoConvert_core_ShootStatus_To_v1beta1_ShootStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_v1beta1_Toleration_To_core_Toleration is an autogenerated conversion function.
+func Convert_v1beta1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error {
+ return autoConvert_v1beta1_Toleration_To_core_Toleration(in, out, s)
+}
+
+func autoConvert_core_Toleration_To_v1beta1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = (*string)(unsafe.Pointer(in.Value))
+ return nil
+}
+
+// Convert_core_Toleration_To_v1beta1_Toleration is an autogenerated conversion function.
+func Convert_core_Toleration_To_v1beta1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error {
+ return autoConvert_core_Toleration_To_v1beta1_Toleration(in, out, s)
+}
+
+func autoConvert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold))
+ out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst))
+ out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit))
+ out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance))
+ out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction))
+ out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval))
+ out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval))
+ return nil
+}
+
+// Convert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold))
+ out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst))
+ out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit))
+ out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance))
+ out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction))
+ out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval))
+ out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval))
+ return nil
+}
+
+// Convert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler is an autogenerated conversion function.
+func Convert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_v1beta1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error {
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_v1beta1_Volume_To_core_Volume is an autogenerated conversion function.
+func Convert_v1beta1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error {
+ return autoConvert_v1beta1_Volume_To_core_Volume(in, out, s)
+}
+
+func autoConvert_core_Volume_To_v1beta1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error {
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Type = (*string)(unsafe.Pointer(in.Type))
+ out.VolumeSize = in.VolumeSize
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ return nil
+}
+
+// Convert_core_Volume_To_v1beta1_Volume is an autogenerated conversion function.
+func Convert_core_Volume_To_v1beta1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error {
+ return autoConvert_core_Volume_To_v1beta1_Volume(in, out, s)
+}
+
+func autoConvert_v1beta1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error {
+ out.Class = in.Class
+ out.Name = in.Name
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_v1beta1_VolumeType_To_core_VolumeType is an autogenerated conversion function.
+func Convert_v1beta1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error {
+ return autoConvert_v1beta1_VolumeType_To_core_VolumeType(in, out, s)
+}
+
+func autoConvert_core_VolumeType_To_v1beta1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error {
+ out.Class = in.Class
+ out.Name = in.Name
+ out.Usable = (*bool)(unsafe.Pointer(in.Usable))
+ return nil
+}
+
+// Convert_core_VolumeType_To_v1beta1_VolumeType is an autogenerated conversion function.
+func Convert_core_VolumeType_To_v1beta1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error {
+ return autoConvert_core_VolumeType_To_v1beta1_VolumeType(in, out, s)
+}
+
+func autoConvert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error {
+ out.Default = (*int32)(unsafe.Pointer(in.Default))
+ out.Resources = *(*[]core.ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes is an autogenerated conversion function.
+func Convert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error {
+ return autoConvert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(in, out, s)
+}
+
+func autoConvert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error {
+ out.Default = (*int32)(unsafe.Pointer(in.Default))
+ out.Resources = *(*[]ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes is an autogenerated conversion function.
+func Convert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error {
+ return autoConvert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(in, out, s)
+}
+
+func autoConvert_v1beta1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.CRI = (*core.CRI)(unsafe.Pointer(in.CRI))
+ out.Kubernetes = (*core.WorkerKubernetes)(unsafe.Pointer(in.Kubernetes))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Name = in.Name
+ if err := Convert_v1beta1_Machine_To_core_Machine(&in.Machine, &out.Machine, s); err != nil {
+ return err
+ }
+ out.Maximum = in.Maximum
+ out.Minimum = in.Minimum
+ out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge))
+ out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*core.Volume)(unsafe.Pointer(in.Volume))
+ out.DataVolumes = *(*[]core.DataVolume)(unsafe.Pointer(&in.DataVolumes))
+ out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName))
+ out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones))
+ out.SystemComponents = (*core.WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents))
+ out.MachineControllerManagerSettings = (*core.MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings))
+ return nil
+}
+
+// Convert_v1beta1_Worker_To_core_Worker is an autogenerated conversion function.
+func Convert_v1beta1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error {
+ return autoConvert_v1beta1_Worker_To_core_Worker(in, out, s)
+}
+
+func autoConvert_core_Worker_To_v1beta1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error {
+ out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+ out.CABundle = (*string)(unsafe.Pointer(in.CABundle))
+ out.CRI = (*CRI)(unsafe.Pointer(in.CRI))
+ out.Kubernetes = (*WorkerKubernetes)(unsafe.Pointer(in.Kubernetes))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Name = in.Name
+ if err := Convert_core_Machine_To_v1beta1_Machine(&in.Machine, &out.Machine, s); err != nil {
+ return err
+ }
+ out.Maximum = in.Maximum
+ out.Minimum = in.Minimum
+ out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge))
+ out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
+ out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig))
+ out.SystemComponents = (*WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents))
+ out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints))
+ out.Volume = (*Volume)(unsafe.Pointer(in.Volume))
+ out.DataVolumes = *(*[]DataVolume)(unsafe.Pointer(&in.DataVolumes))
+ out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName))
+ out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones))
+ out.MachineControllerManagerSettings = (*MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings))
+ return nil
+}
+
+// Convert_core_Worker_To_v1beta1_Worker is an autogenerated conversion function.
+func Convert_core_Worker_To_v1beta1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error {
+ return autoConvert_core_Worker_To_v1beta1_Worker(in, out, s)
+}
+
+func autoConvert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error {
+ out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ return nil
+}
+
+// Convert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes is an autogenerated conversion function.
+func Convert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error {
+ return autoConvert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(in, out, s)
+}
+
+func autoConvert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error {
+ out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet))
+ return nil
+}
+
+// Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes is an autogenerated conversion function.
+func Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error {
+ return autoConvert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in, out, s)
+}
+
+func autoConvert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error {
+ out.Allow = in.Allow
+ return nil
+}
+
+// Convert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents is an autogenerated conversion function.
+func Convert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error {
+ return autoConvert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(in, out, s)
+}
+
+func autoConvert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error {
+ out.Allow = in.Allow
+ return nil
+}
+
+// Convert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents is an autogenerated conversion function.
+func Convert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error {
+ return autoConvert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(in, out, s)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..78aab25
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,3949 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addon) DeepCopyInto(out *Addon) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
+func (in *Addon) DeepCopy() *Addon {
+ if in == nil {
+ return nil
+ }
+ out := new(Addon)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addons) DeepCopyInto(out *Addons) {
+ *out = *in
+ if in.KubernetesDashboard != nil {
+ in, out := &in.KubernetesDashboard, &out.KubernetesDashboard
+ *out = new(KubernetesDashboard)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NginxIngress != nil {
+ in, out := &in.NginxIngress, &out.NginxIngress
+ *out = new(NginxIngress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addons.
+func (in *Addons) DeepCopy() *Addons {
+ if in == nil {
+ return nil
+ }
+ out := new(Addons)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) {
+ *out = *in
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPlugin.
+func (in *AdmissionPlugin) DeepCopy() *AdmissionPlugin {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionPlugin)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Alerting) DeepCopyInto(out *Alerting) {
+ *out = *in
+ if in.EmailReceivers != nil {
+ in, out := &in.EmailReceivers, &out.EmailReceivers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting.
+func (in *Alerting) DeepCopy() *Alerting {
+ if in == nil {
+ return nil
+ }
+ out := new(Alerting)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
+ *out = *in
+ if in.AuditPolicy != nil {
+ in, out := &in.AuditPolicy, &out.AuditPolicy
+ *out = new(AuditPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
+func (in *AuditConfig) DeepCopy() *AuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditPolicy) DeepCopyInto(out *AuditPolicy) {
+ *out = *in
+ if in.ConfigMapRef != nil {
+ in, out := &in.ConfigMapRef, &out.ConfigMapRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicy.
+func (in *AuditPolicy) DeepCopy() *AuditPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) {
+ *out = *in
+ if in.UnavailableMachineTypes != nil {
+ in, out := &in.UnavailableMachineTypes, &out.UnavailableMachineTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UnavailableVolumeTypes != nil {
+ in, out := &in.UnavailableVolumeTypes, &out.UnavailableVolumeTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone.
+func (in *AvailabilityZone) DeepCopy() *AvailabilityZone {
+ if in == nil {
+ return nil
+ }
+ out := new(AvailabilityZone)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucket) DeepCopyInto(out *BackupBucket) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket.
+func (in *BackupBucket) DeepCopy() *BackupBucket {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucket)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucket) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupBucket, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList.
+func (in *BackupBucketList) DeepCopy() *BackupBucketList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucketList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketProvider) DeepCopyInto(out *BackupBucketProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketProvider.
+func (in *BackupBucketProvider) DeepCopy() *BackupBucketProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) {
+ *out = *in
+ out.Provider = in.Provider
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ out.SecretRef = in.SecretRef
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec.
+func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) {
+ *out = *in
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GeneratedSecretRef != nil {
+ in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus.
+func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntry) DeepCopyInto(out *BackupEntry) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry.
+func (in *BackupEntry) DeepCopy() *BackupEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntry) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList.
+func (in *BackupEntryList) DeepCopy() *BackupEntryList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) {
+ *out = *in
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec.
+func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntrySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) {
+ *out = *in
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus.
+func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CRI) DeepCopyInto(out *CRI) {
+ *out = *in
+ if in.ContainerRuntimes != nil {
+ in, out := &in.ContainerRuntimes, &out.ContainerRuntimes
+ *out = make([]ContainerRuntime, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRI.
+func (in *CRI) DeepCopy() *CRI {
+ if in == nil {
+ return nil
+ }
+ out := new(CRI)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudInfo) DeepCopyInto(out *CloudInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInfo.
+func (in *CloudInfo) DeepCopy() *CloudInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfile) DeepCopyInto(out *CloudProfile) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfile.
+func (in *CloudProfile) DeepCopy() *CloudProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudProfile) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfileList) DeepCopyInto(out *CloudProfileList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CloudProfile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileList.
+func (in *CloudProfileList) DeepCopy() *CloudProfileList {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfileList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudProfileList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) {
+ *out = *in
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ in.Kubernetes.DeepCopyInto(&out.Kubernetes)
+ if in.MachineImages != nil {
+ in, out := &in.MachineImages, &out.MachineImages
+ *out = make([]MachineImage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.MachineTypes != nil {
+ in, out := &in.MachineTypes, &out.MachineTypes
+ *out = make([]MachineType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Regions != nil {
+ in, out := &in.Regions, &out.Regions
+ *out = make([]Region, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(SeedSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VolumeTypes != nil {
+ in, out := &in.VolumeTypes, &out.VolumeTypes
+ *out = make([]VolumeType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileSpec.
+func (in *CloudProfileSpec) DeepCopy() *CloudProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) {
+ *out = *in
+ if in.ScaleDownDelayAfterAdd != nil {
+ in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownDelayAfterDelete != nil {
+ in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownDelayAfterFailure != nil {
+ in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownUnneededTime != nil {
+ in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownUtilizationThreshold != nil {
+ in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold
+ *out = new(float64)
+ **out = **in
+ }
+ if in.ScanInterval != nil {
+ in, out := &in.ScanInterval, &out.ScanInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler.
+func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) {
+ *out = *in
+ out.Cloud = in.Cloud
+ out.Kubernetes = in.Kubernetes
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo.
+func (in *ClusterInfo) DeepCopy() *ClusterInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ if in.Codes != nil {
+ in, out := &in.Codes, &out.Codes
+ *out = make([]ErrorCode, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntime.
+func (in *ContainerRuntime) DeepCopy() *ContainerRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Policy != nil {
+ in, out := &in.Policy, &out.Policy
+ *out = new(ControllerDeploymentPolicy)
+ **out = **in
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDeployment.
+func (in *ControllerDeployment) DeepCopy() *ControllerDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallation) DeepCopyInto(out *ControllerInstallation) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallation.
+func (in *ControllerInstallation) DeepCopy() *ControllerInstallation {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerInstallation) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationList) DeepCopyInto(out *ControllerInstallationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerInstallation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationList.
+func (in *ControllerInstallationList) DeepCopy() *ControllerInstallationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerInstallationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationSpec) DeepCopyInto(out *ControllerInstallationSpec) {
+ *out = *in
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationSpec.
+func (in *ControllerInstallationSpec) DeepCopy() *ControllerInstallationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallationStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationStatus.
+func (in *ControllerInstallationStatus) DeepCopy() *ControllerInstallationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistration) DeepCopyInto(out *ControllerRegistration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistration.
+func (in *ControllerRegistration) DeepCopy() *ControllerRegistration {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRegistration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistrationList) DeepCopyInto(out *ControllerRegistrationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerRegistration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationList.
+func (in *ControllerRegistrationList) DeepCopy() *ControllerRegistrationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistrationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRegistrationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistrationSpec) DeepCopyInto(out *ControllerRegistrationSpec) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ControllerResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Deployment != nil {
+ in, out := &in.Deployment, &out.Deployment
+ *out = new(ControllerDeployment)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationSpec.
+func (in *ControllerRegistrationSpec) DeepCopy() *ControllerRegistrationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistrationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerResource) DeepCopyInto(out *ControllerResource) {
+ *out = *in
+ if in.GloballyEnabled != nil {
+ in, out := &in.GloballyEnabled, &out.GloballyEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ReconcileTimeout != nil {
+ in, out := &in.ReconcileTimeout, &out.ReconcileTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Primary != nil {
+ in, out := &in.Primary, &out.Primary
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerResource.
+func (in *ControllerResource) DeepCopy() *ControllerResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ *out = *in
+ if in.Domain != nil {
+ in, out := &in.Domain, &out.Domain
+ *out = new(string)
+ **out = **in
+ }
+ if in.Providers != nil {
+ in, out := &in.Providers, &out.Providers
+ *out = make([]DNSProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSIncludeExclude) DeepCopyInto(out *DNSIncludeExclude) {
+ *out = *in
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Exclude != nil {
+ in, out := &in.Exclude, &out.Exclude
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSIncludeExclude.
+func (in *DNSIncludeExclude) DeepCopy() *DNSIncludeExclude {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSIncludeExclude)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProvider) DeepCopyInto(out *DNSProvider) {
+ *out = *in
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Primary != nil {
+ in, out := &in.Primary, &out.Primary
+ *out = new(bool)
+ **out = **in
+ }
+ if in.SecretName != nil {
+ in, out := &in.SecretName, &out.SecretName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider.
+func (in *DNSProvider) DeepCopy() *DNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataVolume) DeepCopyInto(out *DataVolume) {
+ *out = *in
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume.
+func (in *DataVolume) DeepCopy() *DataVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(DataVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoint) DeepCopyInto(out *Endpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
+func (in *Endpoint) DeepCopy() *Endpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(Endpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExpirableVersion) DeepCopyInto(out *ExpirableVersion) {
+ *out = *in
+ if in.ExpirationDate != nil {
+ in, out := &in.ExpirationDate, &out.ExpirationDate
+ *out = (*in).DeepCopy()
+ }
+ if in.Classification != nil {
+ in, out := &in.Classification, &out.Classification
+ *out = new(VersionClassification)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirableVersion.
+func (in *ExpirableVersion) DeepCopy() *ExpirableVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(ExpirableVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Extension) DeepCopyInto(out *Extension) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension.
+func (in *Extension) DeepCopy() *Extension {
+ if in == nil {
+ return nil
+ }
+ out := new(Extension)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Gardener) DeepCopyInto(out *Gardener) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardener.
+func (in *Gardener) DeepCopy() *Gardener {
+ if in == nil {
+ return nil
+ }
+ out := new(Gardener)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Hibernation) DeepCopyInto(out *Hibernation) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Schedules != nil {
+ in, out := &in.Schedules, &out.Schedules
+ *out = make([]HibernationSchedule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hibernation.
+func (in *Hibernation) DeepCopy() *Hibernation {
+ if in == nil {
+ return nil
+ }
+ out := new(Hibernation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HibernationSchedule) DeepCopyInto(out *HibernationSchedule) {
+ *out = *in
+ if in.Start != nil {
+ in, out := &in.Start, &out.Start
+ *out = new(string)
+ **out = **in
+ }
+ if in.End != nil {
+ in, out := &in.End, &out.End
+ *out = new(string)
+ **out = **in
+ }
+ if in.Location != nil {
+ in, out := &in.Location, &out.Location
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationSchedule.
+func (in *HibernationSchedule) DeepCopy() *HibernationSchedule {
+ if in == nil {
+ return nil
+ }
+ out := new(HibernationSchedule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) {
+ *out = *in
+ if in.CPUInitializationPeriod != nil {
+ in, out := &in.CPUInitializationPeriod, &out.CPUInitializationPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DownscaleDelay != nil {
+ in, out := &in.DownscaleDelay, &out.DownscaleDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DownscaleStabilization != nil {
+ in, out := &in.DownscaleStabilization, &out.DownscaleStabilization
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.InitialReadinessDelay != nil {
+ in, out := &in.InitialReadinessDelay, &out.InitialReadinessDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Tolerance != nil {
+ in, out := &in.Tolerance, &out.Tolerance
+ *out = new(float64)
+ **out = **in
+ }
+ if in.UpscaleDelay != nil {
+ in, out := &in.UpscaleDelay, &out.UpscaleDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig.
+func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(HorizontalPodAutoscalerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+ *out = *in
+ in.Controller.DeepCopyInto(&out.Controller)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+ if in == nil {
+ return nil
+ }
+ out := new(Ingress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressController) DeepCopyInto(out *IngressController) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController.
+func (in *IngressController) DeepCopy() *IngressController {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressController)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.AdmissionPlugins != nil {
+ in, out := &in.AdmissionPlugins, &out.AdmissionPlugins
+ *out = make([]AdmissionPlugin, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.APIAudiences != nil {
+ in, out := &in.APIAudiences, &out.APIAudiences
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AuditConfig != nil {
+ in, out := &in.AuditConfig, &out.AuditConfig
+ *out = new(AuditConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EnableBasicAuthentication != nil {
+ in, out := &in.EnableBasicAuthentication, &out.EnableBasicAuthentication
+ *out = new(bool)
+ **out = **in
+ }
+ if in.OIDCConfig != nil {
+ in, out := &in.OIDCConfig, &out.OIDCConfig
+ *out = new(OIDCConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RuntimeConfig != nil {
+ in, out := &in.RuntimeConfig, &out.RuntimeConfig
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ServiceAccountConfig != nil {
+ in, out := &in.ServiceAccountConfig, &out.ServiceAccountConfig
+ *out = new(ServiceAccountConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WatchCacheSizes != nil {
+ in, out := &in.WatchCacheSizes, &out.WatchCacheSizes
+ *out = new(WatchCacheSizes)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = new(KubeAPIServerRequests)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig.
+func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerRequests) DeepCopyInto(out *KubeAPIServerRequests) {
+ *out = *in
+ if in.MaxNonMutatingInflight != nil {
+ in, out := &in.MaxNonMutatingInflight, &out.MaxNonMutatingInflight
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MaxMutatingInflight != nil {
+ in, out := &in.MaxMutatingInflight, &out.MaxMutatingInflight
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerRequests.
+func (in *KubeAPIServerRequests) DeepCopy() *KubeAPIServerRequests {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerRequests)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.HorizontalPodAutoscalerConfig != nil {
+ in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig
+ *out = new(HorizontalPodAutoscalerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodeCIDRMaskSize != nil {
+ in, out := &in.NodeCIDRMaskSize, &out.NodeCIDRMaskSize
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodEvictionTimeout != nil {
+ in, out := &in.PodEvictionTimeout, &out.PodEvictionTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig.
+func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeProxyConfig) DeepCopyInto(out *KubeProxyConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.Mode != nil {
+ in, out := &in.Mode, &out.Mode
+ *out = new(ProxyMode)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfig.
+func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.KubeMaxPDVols != nil {
+ in, out := &in.KubeMaxPDVols, &out.KubeMaxPDVols
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfig.
+func (in *KubeSchedulerConfig) DeepCopy() *KubeSchedulerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeSchedulerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.CPUCFSQuota != nil {
+ in, out := &in.CPUCFSQuota, &out.CPUCFSQuota
+ *out = new(bool)
+ **out = **in
+ }
+ if in.CPUManagerPolicy != nil {
+ in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy
+ *out = new(string)
+ **out = **in
+ }
+ if in.EvictionHard != nil {
+ in, out := &in.EvictionHard, &out.EvictionHard
+ *out = new(KubeletConfigEviction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionMaxPodGracePeriod != nil {
+ in, out := &in.EvictionMaxPodGracePeriod, &out.EvictionMaxPodGracePeriod
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EvictionMinimumReclaim != nil {
+ in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim
+ *out = new(KubeletConfigEvictionMinimumReclaim)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionPressureTransitionPeriod != nil {
+ in, out := &in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.EvictionSoft != nil {
+ in, out := &in.EvictionSoft, &out.EvictionSoft
+ *out = new(KubeletConfigEviction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionSoftGracePeriod != nil {
+ in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod
+ *out = new(KubeletConfigEvictionSoftGracePeriod)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MaxPods != nil {
+ in, out := &in.MaxPods, &out.MaxPods
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodPIDsLimit != nil {
+ in, out := &in.PodPIDsLimit, &out.PodPIDsLimit
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ImagePullProgressDeadline != nil {
+ in, out := &in.ImagePullProgressDeadline, &out.ImagePullProgressDeadline
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.FailSwapOn != nil {
+ in, out := &in.FailSwapOn, &out.FailSwapOn
+ *out = new(bool)
+ **out = **in
+ }
+ if in.KubeReserved != nil {
+ in, out := &in.KubeReserved, &out.KubeReserved
+ *out = new(KubeletConfigReserved)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SystemReserved != nil {
+ in, out := &in.SystemReserved, &out.SystemReserved
+ *out = new(KubeletConfigReserved)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfig.
+func (in *KubeletConfig) DeepCopy() *KubeletConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEviction) DeepCopyInto(out *KubeletConfigEviction) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEviction.
+func (in *KubeletConfigEviction) DeepCopy() *KubeletConfigEviction {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEviction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEvictionMinimumReclaim) DeepCopyInto(out *KubeletConfigEvictionMinimumReclaim) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionMinimumReclaim.
+func (in *KubeletConfigEvictionMinimumReclaim) DeepCopy() *KubeletConfigEvictionMinimumReclaim {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEvictionMinimumReclaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopyInto(out *KubeletConfigEvictionSoftGracePeriod) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionSoftGracePeriod.
+func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictionSoftGracePeriod {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEvictionSoftGracePeriod)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigReserved) DeepCopyInto(out *KubeletConfigReserved) {
+ *out = *in
+ if in.CPU != nil {
+ in, out := &in.CPU, &out.CPU
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Memory != nil {
+ in, out := &in.Memory, &out.Memory
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.EphemeralStorage != nil {
+ in, out := &in.EphemeralStorage, &out.EphemeralStorage
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.PID != nil {
+ in, out := &in.PID, &out.PID
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigReserved.
+func (in *KubeletConfigReserved) DeepCopy() *KubeletConfigReserved {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigReserved)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Kubernetes) DeepCopyInto(out *Kubernetes) {
+ *out = *in
+ if in.AllowPrivilegedContainers != nil {
+ in, out := &in.AllowPrivilegedContainers, &out.AllowPrivilegedContainers
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ClusterAutoscaler != nil {
+ in, out := &in.ClusterAutoscaler, &out.ClusterAutoscaler
+ *out = new(ClusterAutoscaler)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeAPIServer != nil {
+ in, out := &in.KubeAPIServer, &out.KubeAPIServer
+ *out = new(KubeAPIServerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeControllerManager != nil {
+ in, out := &in.KubeControllerManager, &out.KubeControllerManager
+ *out = new(KubeControllerManagerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeScheduler != nil {
+ in, out := &in.KubeScheduler, &out.KubeScheduler
+ *out = new(KubeSchedulerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeProxy != nil {
+ in, out := &in.KubeProxy, &out.KubeProxy
+ *out = new(KubeProxyConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubelet != nil {
+ in, out := &in.Kubelet, &out.Kubelet
+ *out = new(KubeletConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VerticalPodAutoscaler != nil {
+ in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler
+ *out = new(VerticalPodAutoscaler)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes.
+func (in *Kubernetes) DeepCopy() *Kubernetes {
+ if in == nil {
+ return nil
+ }
+ out := new(Kubernetes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) {
+ *out = *in
+ if in.FeatureGates != nil {
+ in, out := &in.FeatureGates, &out.FeatureGates
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig.
+func (in *KubernetesConfig) DeepCopy() *KubernetesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) {
+ *out = *in
+ out.Addon = in.Addon
+ if in.AuthenticationMode != nil {
+ in, out := &in.AuthenticationMode, &out.AuthenticationMode
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesDashboard.
+func (in *KubernetesDashboard) DeepCopy() *KubernetesDashboard {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesDashboard)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesInfo) DeepCopyInto(out *KubernetesInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInfo.
+func (in *KubernetesInfo) DeepCopy() *KubernetesInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesSettings) DeepCopyInto(out *KubernetesSettings) {
+ *out = *in
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]ExpirableVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSettings.
+func (in *KubernetesSettings) DeepCopy() *KubernetesSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastError) DeepCopyInto(out *LastError) {
+ *out = *in
+ if in.TaskID != nil {
+ in, out := &in.TaskID, &out.TaskID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Codes != nil {
+ in, out := &in.Codes, &out.Codes
+ *out = make([]ErrorCode, len(*in))
+ copy(*out, *in)
+ }
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError.
+func (in *LastError) DeepCopy() *LastError {
+ if in == nil {
+ return nil
+ }
+ out := new(LastError)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastOperation) DeepCopyInto(out *LastOperation) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation.
+func (in *LastOperation) DeepCopy() *LastOperation {
+ if in == nil {
+ return nil
+ }
+ out := new(LastOperation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Machine) DeepCopyInto(out *Machine) {
+ *out = *in
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ShootMachineImage)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine.
+func (in *Machine) DeepCopy() *Machine {
+ if in == nil {
+ return nil
+ }
+ out := new(Machine)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineControllerManagerSettings) DeepCopyInto(out *MachineControllerManagerSettings) {
+ *out = *in
+ if in.MachineDrainTimeout != nil {
+ in, out := &in.MachineDrainTimeout, &out.MachineDrainTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MachineHealthTimeout != nil {
+ in, out := &in.MachineHealthTimeout, &out.MachineHealthTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MachineCreationTimeout != nil {
+ in, out := &in.MachineCreationTimeout, &out.MachineCreationTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MaxEvictRetries != nil {
+ in, out := &in.MaxEvictRetries, &out.MaxEvictRetries
+ *out = new(int32)
+ **out = **in
+ }
+ if in.NodeConditions != nil {
+ in, out := &in.NodeConditions, &out.NodeConditions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineControllerManagerSettings.
+func (in *MachineControllerManagerSettings) DeepCopy() *MachineControllerManagerSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineControllerManagerSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImage) DeepCopyInto(out *MachineImage) {
+ *out = *in
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]MachineImageVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage.
+func (in *MachineImage) DeepCopy() *MachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImageVersion) DeepCopyInto(out *MachineImageVersion) {
+ *out = *in
+ in.ExpirableVersion.DeepCopyInto(&out.ExpirableVersion)
+ if in.CRI != nil {
+ in, out := &in.CRI, &out.CRI
+ *out = make([]CRI, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImageVersion.
+func (in *MachineImageVersion) DeepCopy() *MachineImageVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImageVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineType) DeepCopyInto(out *MachineType) {
+ *out = *in
+ out.CPU = in.CPU.DeepCopy()
+ out.GPU = in.GPU.DeepCopy()
+ out.Memory = in.Memory.DeepCopy()
+ if in.Storage != nil {
+ in, out := &in.Storage, &out.Storage
+ *out = new(MachineTypeStorage)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Usable != nil {
+ in, out := &in.Usable, &out.Usable
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineType.
+func (in *MachineType) DeepCopy() *MachineType {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineTypeStorage) DeepCopyInto(out *MachineTypeStorage) {
+ *out = *in
+ out.StorageSize = in.StorageSize.DeepCopy()
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTypeStorage.
+func (in *MachineTypeStorage) DeepCopy() *MachineTypeStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineTypeStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Maintenance) DeepCopyInto(out *Maintenance) {
+ *out = *in
+ if in.AutoUpdate != nil {
+ in, out := &in.AutoUpdate, &out.AutoUpdate
+ *out = new(MaintenanceAutoUpdate)
+ **out = **in
+ }
+ if in.TimeWindow != nil {
+ in, out := &in.TimeWindow, &out.TimeWindow
+ *out = new(MaintenanceTimeWindow)
+ **out = **in
+ }
+ if in.ConfineSpecUpdateRollout != nil {
+ in, out := &in.ConfineSpecUpdateRollout, &out.ConfineSpecUpdateRollout
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintenance.
+func (in *Maintenance) DeepCopy() *Maintenance {
+ if in == nil {
+ return nil
+ }
+ out := new(Maintenance)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaintenanceAutoUpdate) DeepCopyInto(out *MaintenanceAutoUpdate) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceAutoUpdate.
+func (in *MaintenanceAutoUpdate) DeepCopy() *MaintenanceAutoUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(MaintenanceAutoUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow.
+func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow {
+ if in == nil {
+ return nil
+ }
+ out := new(MaintenanceTimeWindow)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Monitoring) DeepCopyInto(out *Monitoring) {
+ *out = *in
+ if in.Alerting != nil {
+ in, out := &in.Alerting, &out.Alerting
+ *out = new(Alerting)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring.
+func (in *Monitoring) DeepCopy() *Monitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(Monitoring)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedResourceReference) DeepCopyInto(out *NamedResourceReference) {
+ *out = *in
+ out.ResourceRef = in.ResourceRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourceReference.
+func (in *NamedResourceReference) DeepCopy() *NamedResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Networking) DeepCopyInto(out *Networking) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = new(string)
+ **out = **in
+ }
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = new(string)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking.
+func (in *Networking) DeepCopy() *Networking {
+ if in == nil {
+ return nil
+ }
+ out := new(Networking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NginxIngress) DeepCopyInto(out *NginxIngress) {
+ *out = *in
+ out.Addon = in.Addon
+ if in.LoadBalancerSourceRanges != nil {
+ in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ExternalTrafficPolicy != nil {
+ in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy
+ *out = new(v1.ServiceExternalTrafficPolicyType)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxIngress.
+func (in *NginxIngress) DeepCopy() *NginxIngress {
+ if in == nil {
+ return nil
+ }
+ out := new(NginxIngress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) {
+ *out = *in
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClientAuthentication != nil {
+ in, out := &in.ClientAuthentication, &out.ClientAuthentication
+ *out = new(OpenIDConnectClientAuthentication)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ClientID != nil {
+ in, out := &in.ClientID, &out.ClientID
+ *out = new(string)
+ **out = **in
+ }
+ if in.GroupsClaim != nil {
+ in, out := &in.GroupsClaim, &out.GroupsClaim
+ *out = new(string)
+ **out = **in
+ }
+ if in.GroupsPrefix != nil {
+ in, out := &in.GroupsPrefix, &out.GroupsPrefix
+ *out = new(string)
+ **out = **in
+ }
+ if in.IssuerURL != nil {
+ in, out := &in.IssuerURL, &out.IssuerURL
+ *out = new(string)
+ **out = **in
+ }
+ if in.RequiredClaims != nil {
+ in, out := &in.RequiredClaims, &out.RequiredClaims
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SigningAlgs != nil {
+ in, out := &in.SigningAlgs, &out.SigningAlgs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsernameClaim != nil {
+ in, out := &in.UsernameClaim, &out.UsernameClaim
+ *out = new(string)
+ **out = **in
+ }
+ if in.UsernamePrefix != nil {
+ in, out := &in.UsernamePrefix, &out.UsernamePrefix
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig.
+func (in *OIDCConfig) DeepCopy() *OIDCConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) {
+ *out = *in
+ if in.ExtraConfig != nil {
+ in, out := &in.ExtraConfig, &out.ExtraConfig
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication.
+func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDConnectClientAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Plant) DeepCopyInto(out *Plant) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plant.
+func (in *Plant) DeepCopy() *Plant {
+ if in == nil {
+ return nil
+ }
+ out := new(Plant)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Plant) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantList) DeepCopyInto(out *PlantList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Plant, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantList.
+func (in *PlantList) DeepCopy() *PlantList {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PlantList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantSpec) DeepCopyInto(out *PlantSpec) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]Endpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantSpec.
+func (in *PlantSpec) DeepCopy() *PlantSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantStatus) DeepCopyInto(out *PlantStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ObservedGeneration != nil {
+ in, out := &in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ClusterInfo != nil {
+ in, out := &in.ClusterInfo, &out.ClusterInfo
+ *out = new(ClusterInfo)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantStatus.
+func (in *PlantStatus) DeepCopy() *PlantStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Project) DeepCopyInto(out *Project) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
+func (in *Project) DeepCopy() *Project {
+ if in == nil {
+ return nil
+ }
+ out := new(Project)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Project) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectList) DeepCopyInto(out *ProjectList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
+func (in *ProjectList) DeepCopy() *ProjectList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectMember) DeepCopyInto(out *ProjectMember) {
+ *out = *in
+ out.Subject = in.Subject
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectMember.
+func (in *ProjectMember) DeepCopy() *ProjectMember {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectMember)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
+ *out = *in
+ if in.CreatedBy != nil {
+ in, out := &in.CreatedBy, &out.CreatedBy
+ *out = new(rbacv1.Subject)
+ **out = **in
+ }
+ if in.Description != nil {
+ in, out := &in.Description, &out.Description
+ *out = new(string)
+ **out = **in
+ }
+ if in.Owner != nil {
+ in, out := &in.Owner, &out.Owner
+ *out = new(rbacv1.Subject)
+ **out = **in
+ }
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(string)
+ **out = **in
+ }
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]ProjectMember, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Namespace != nil {
+ in, out := &in.Namespace, &out.Namespace
+ *out = new(string)
+ **out = **in
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = new(ProjectTolerations)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
+func (in *ProjectSpec) DeepCopy() *ProjectSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
+ *out = *in
+ if in.StaleSinceTimestamp != nil {
+ in, out := &in.StaleSinceTimestamp, &out.StaleSinceTimestamp
+ *out = (*in).DeepCopy()
+ }
+ if in.StaleAutoDeleteTimestamp != nil {
+ in, out := &in.StaleAutoDeleteTimestamp, &out.StaleAutoDeleteTimestamp
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
+func (in *ProjectStatus) DeepCopy() *ProjectStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectTolerations) DeepCopyInto(out *ProjectTolerations) {
+ *out = *in
+ if in.Defaults != nil {
+ in, out := &in.Defaults, &out.Defaults
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Whitelist != nil {
+ in, out := &in.Whitelist, &out.Whitelist
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectTolerations.
+func (in *ProjectTolerations) DeepCopy() *ProjectTolerations {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectTolerations)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Provider) DeepCopyInto(out *Provider) {
+ *out = *in
+ if in.ControlPlaneConfig != nil {
+ in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.InfrastructureConfig != nil {
+ in, out := &in.InfrastructureConfig, &out.InfrastructureConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]Worker, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider.
+func (in *Provider) DeepCopy() *Provider {
+ if in == nil {
+ return nil
+ }
+ out := new(Provider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quota) DeepCopyInto(out *Quota) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota.
+func (in *Quota) DeepCopy() *Quota {
+ if in == nil {
+ return nil
+ }
+ out := new(Quota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Quota) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaList) DeepCopyInto(out *QuotaList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Quota, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaList.
+func (in *QuotaList) DeepCopy() *QuotaList {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *QuotaList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) {
+ *out = *in
+ if in.ClusterLifetimeDays != nil {
+ in, out := &in.ClusterLifetimeDays, &out.ClusterLifetimeDays
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Metrics != nil {
+ in, out := &in.Metrics, &out.Metrics
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ out.Scope = in.Scope
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec.
+func (in *QuotaSpec) DeepCopy() *QuotaSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Region) DeepCopyInto(out *Region) {
+ *out = *in
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]AvailabilityZone, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region.
+func (in *Region) DeepCopy() *Region {
+ if in == nil {
+ return nil
+ }
+ out := new(Region)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceWatchCacheSize) DeepCopyInto(out *ResourceWatchCacheSize) {
+ *out = *in
+ if in.APIGroup != nil {
+ in, out := &in.APIGroup, &out.APIGroup
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceWatchCacheSize.
+func (in *ResourceWatchCacheSize) DeepCopy() *ResourceWatchCacheSize {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceWatchCacheSize)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBinding) DeepCopyInto(out *SecretBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.SecretRef = in.SecretRef
+ if in.Quotas != nil {
+ in, out := &in.Quotas, &out.Quotas
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding.
+func (in *SecretBinding) DeepCopy() *SecretBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBindingList) DeepCopyInto(out *SecretBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]SecretBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBindingList.
+func (in *SecretBindingList) DeepCopy() *SecretBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Seed) DeepCopyInto(out *Seed) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seed.
+func (in *Seed) DeepCopy() *Seed {
+ if in == nil {
+ return nil
+ }
+ out := new(Seed)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Seed) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedBackup) DeepCopyInto(out *SeedBackup) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Region != nil {
+ in, out := &in.Region, &out.Region
+ *out = new(string)
+ **out = **in
+ }
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedBackup.
+func (in *SeedBackup) DeepCopy() *SeedBackup {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedBackup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedDNS) DeepCopyInto(out *SeedDNS) {
+ *out = *in
+ if in.IngressDomain != nil {
+ in, out := &in.IngressDomain, &out.IngressDomain
+ *out = new(string)
+ **out = **in
+ }
+ if in.Provider != nil {
+ in, out := &in.Provider, &out.Provider
+ *out = new(SeedDNSProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS.
+func (in *SeedDNS) DeepCopy() *SeedDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedDNSProvider) DeepCopyInto(out *SeedDNSProvider) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNSProvider.
+func (in *SeedDNSProvider) DeepCopy() *SeedDNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedDNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedList) DeepCopyInto(out *SeedList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Seed, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedList.
+func (in *SeedList) DeepCopy() *SeedList {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SeedList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedNetworks) DeepCopyInto(out *SeedNetworks) {
+ *out = *in
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = new(string)
+ **out = **in
+ }
+ if in.ShootDefaults != nil {
+ in, out := &in.ShootDefaults, &out.ShootDefaults
+ *out = new(ShootNetworks)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BlockCIDRs != nil {
+ in, out := &in.BlockCIDRs, &out.BlockCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedNetworks.
+func (in *SeedNetworks) DeepCopy() *SeedNetworks {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedNetworks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedProvider) DeepCopyInto(out *SeedProvider) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedProvider.
+func (in *SeedProvider) DeepCopy() *SeedProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSelector) DeepCopyInto(out *SeedSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ProviderTypes != nil {
+ in, out := &in.ProviderTypes, &out.ProviderTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSelector.
+func (in *SeedSelector) DeepCopy() *SeedSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingExcessCapacityReservation) DeepCopyInto(out *SeedSettingExcessCapacityReservation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingExcessCapacityReservation.
+func (in *SeedSettingExcessCapacityReservation) DeepCopy() *SeedSettingExcessCapacityReservation {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingExcessCapacityReservation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingLoadBalancerServices) DeepCopyInto(out *SeedSettingLoadBalancerServices) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingLoadBalancerServices.
+func (in *SeedSettingLoadBalancerServices) DeepCopy() *SeedSettingLoadBalancerServices {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingLoadBalancerServices)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingScheduling) DeepCopyInto(out *SeedSettingScheduling) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingScheduling.
+func (in *SeedSettingScheduling) DeepCopy() *SeedSettingScheduling {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingScheduling)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingShootDNS) DeepCopyInto(out *SeedSettingShootDNS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingShootDNS.
+func (in *SeedSettingShootDNS) DeepCopy() *SeedSettingShootDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingShootDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingVerticalPodAutoscaler) DeepCopyInto(out *SeedSettingVerticalPodAutoscaler) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingVerticalPodAutoscaler.
+func (in *SeedSettingVerticalPodAutoscaler) DeepCopy() *SeedSettingVerticalPodAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingVerticalPodAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettings) DeepCopyInto(out *SeedSettings) {
+ *out = *in
+ if in.ExcessCapacityReservation != nil {
+ in, out := &in.ExcessCapacityReservation, &out.ExcessCapacityReservation
+ *out = new(SeedSettingExcessCapacityReservation)
+ **out = **in
+ }
+ if in.Scheduling != nil {
+ in, out := &in.Scheduling, &out.Scheduling
+ *out = new(SeedSettingScheduling)
+ **out = **in
+ }
+ if in.ShootDNS != nil {
+ in, out := &in.ShootDNS, &out.ShootDNS
+ *out = new(SeedSettingShootDNS)
+ **out = **in
+ }
+ if in.LoadBalancerServices != nil {
+ in, out := &in.LoadBalancerServices, &out.LoadBalancerServices
+ *out = new(SeedSettingLoadBalancerServices)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VerticalPodAutoscaler != nil {
+ in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler
+ *out = new(SeedSettingVerticalPodAutoscaler)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettings.
+func (in *SeedSettings) DeepCopy() *SeedSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSpec) DeepCopyInto(out *SeedSpec) {
+ *out = *in
+ if in.Backup != nil {
+ in, out := &in.Backup, &out.Backup
+ *out = new(SeedBackup)
+ (*in).DeepCopyInto(*out)
+ }
+ in.DNS.DeepCopyInto(&out.DNS)
+ in.Networks.DeepCopyInto(&out.Networks)
+ in.Provider.DeepCopyInto(&out.Provider)
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]SeedTaint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(SeedVolume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(SeedSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = new(Ingress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSpec.
+func (in *SeedSpec) DeepCopy() *SeedSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedStatus) DeepCopyInto(out *SeedStatus) {
+ *out = *in
+ if in.Gardener != nil {
+ in, out := &in.Gardener, &out.Gardener
+ *out = new(Gardener)
+ **out = **in
+ }
+ if in.KubernetesVersion != nil {
+ in, out := &in.KubernetesVersion, &out.KubernetesVersion
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ClusterIdentity != nil {
+ in, out := &in.ClusterIdentity, &out.ClusterIdentity
+ *out = new(string)
+ **out = **in
+ }
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ if in.Allocatable != nil {
+ in, out := &in.Allocatable, &out.Allocatable
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedStatus.
+func (in *SeedStatus) DeepCopy() *SeedStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedTaint) DeepCopyInto(out *SeedTaint) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTaint.
+func (in *SeedTaint) DeepCopy() *SeedTaint {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedTaint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedVolume) DeepCopyInto(out *SeedVolume) {
+ *out = *in
+ if in.MinimumSize != nil {
+ in, out := &in.MinimumSize, &out.MinimumSize
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Providers != nil {
+ in, out := &in.Providers, &out.Providers
+ *out = make([]SeedVolumeProvider, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolume.
+func (in *SeedVolume) DeepCopy() *SeedVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedVolumeProvider) DeepCopyInto(out *SeedVolumeProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolumeProvider.
+func (in *SeedVolumeProvider) DeepCopy() *SeedVolumeProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedVolumeProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) {
+ *out = *in
+ if in.Issuer != nil {
+ in, out := &in.Issuer, &out.Issuer
+ *out = new(string)
+ **out = **in
+ }
+ if in.SigningKeySecret != nil {
+ in, out := &in.SigningKeySecret, &out.SigningKeySecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig.
+func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Shoot) DeepCopyInto(out *Shoot) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot.
+func (in *Shoot) DeepCopy() *Shoot {
+ if in == nil {
+ return nil
+ }
+ out := new(Shoot)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Shoot) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootList) DeepCopyInto(out *ShootList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Shoot, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootList.
+func (in *ShootList) DeepCopy() *ShootList {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootMachineImage.
+func (in *ShootMachineImage) DeepCopy() *ShootMachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootMachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootNetworks) DeepCopyInto(out *ShootNetworks) {
+ *out = *in
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = new(string)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootNetworks.
+func (in *ShootNetworks) DeepCopy() *ShootNetworks {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootNetworks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootSpec) DeepCopyInto(out *ShootSpec) {
+ *out = *in
+ if in.Addons != nil {
+ in, out := &in.Addons, &out.Addons
+ *out = new(Addons)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = new(DNS)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Extensions != nil {
+ in, out := &in.Extensions, &out.Extensions
+ *out = make([]Extension, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Hibernation != nil {
+ in, out := &in.Hibernation, &out.Hibernation
+ *out = new(Hibernation)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Kubernetes.DeepCopyInto(&out.Kubernetes)
+ in.Networking.DeepCopyInto(&out.Networking)
+ if in.Maintenance != nil {
+ in, out := &in.Maintenance, &out.Maintenance
+ *out = new(Maintenance)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Monitoring != nil {
+ in, out := &in.Monitoring, &out.Monitoring
+ *out = new(Monitoring)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Provider.DeepCopyInto(&out.Provider)
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(ShootPurpose)
+ **out = **in
+ }
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(SeedSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]NamedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootSpec.
+func (in *ShootSpec) DeepCopy() *ShootSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStatus) DeepCopyInto(out *ShootStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Constraints != nil {
+ in, out := &in.Constraints, &out.Constraints
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.Gardener = in.Gardener
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastErrors != nil {
+ in, out := &in.LastErrors, &out.LastErrors
+ *out = make([]LastError, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RetryCycleStartTime != nil {
+ in, out := &in.RetryCycleStartTime, &out.RetryCycleStartTime
+ *out = (*in).DeepCopy()
+ }
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClusterIdentity != nil {
+ in, out := &in.ClusterIdentity, &out.ClusterIdentity
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStatus.
+func (in *ShootStatus) DeepCopy() *ShootStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Toleration) DeepCopyInto(out *Toleration) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration.
+func (in *Toleration) DeepCopy() *Toleration {
+ if in == nil {
+ return nil
+ }
+ out := new(Toleration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) {
+ *out = *in
+ if in.EvictAfterOOMThreshold != nil {
+ in, out := &in.EvictAfterOOMThreshold, &out.EvictAfterOOMThreshold
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.EvictionRateBurst != nil {
+ in, out := &in.EvictionRateBurst, &out.EvictionRateBurst
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EvictionRateLimit != nil {
+ in, out := &in.EvictionRateLimit, &out.EvictionRateLimit
+ *out = new(float64)
+ **out = **in
+ }
+ if in.EvictionTolerance != nil {
+ in, out := &in.EvictionTolerance, &out.EvictionTolerance
+ *out = new(float64)
+ **out = **in
+ }
+ if in.RecommendationMarginFraction != nil {
+ in, out := &in.RecommendationMarginFraction, &out.RecommendationMarginFraction
+ *out = new(float64)
+ **out = **in
+ }
+ if in.UpdaterInterval != nil {
+ in, out := &in.UpdaterInterval, &out.UpdaterInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.RecommenderInterval != nil {
+ in, out := &in.RecommenderInterval, &out.RecommenderInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler.
+func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(VerticalPodAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+ if in == nil {
+ return nil
+ }
+ out := new(Volume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeType) DeepCopyInto(out *VolumeType) {
+ *out = *in
+ if in.Usable != nil {
+ in, out := &in.Usable, &out.Usable
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeType.
+func (in *VolumeType) DeepCopy() *VolumeType {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WatchCacheSizes) DeepCopyInto(out *WatchCacheSizes) {
+ *out = *in
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ResourceWatchCacheSize, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchCacheSizes.
+func (in *WatchCacheSizes) DeepCopy() *WatchCacheSizes {
+ if in == nil {
+ return nil
+ }
+ out := new(WatchCacheSizes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Worker) DeepCopyInto(out *Worker) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ if in.CRI != nil {
+ in, out := &in.CRI, &out.CRI
+ *out = new(CRI)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubernetes != nil {
+ in, out := &in.Kubernetes, &out.Kubernetes
+ *out = new(WorkerKubernetes)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Machine.DeepCopyInto(&out.Machine)
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]v1.Taint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(Volume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DataVolumes != nil {
+ in, out := &in.DataVolumes, &out.DataVolumes
+ *out = make([]DataVolume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.KubeletDataVolumeName != nil {
+ in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SystemComponents != nil {
+ in, out := &in.SystemComponents, &out.SystemComponents
+ *out = new(WorkerSystemComponents)
+ **out = **in
+ }
+ if in.MachineControllerManagerSettings != nil {
+ in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings
+ *out = new(MachineControllerManagerSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker.
+func (in *Worker) DeepCopy() *Worker {
+ if in == nil {
+ return nil
+ }
+ out := new(Worker)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerKubernetes) DeepCopyInto(out *WorkerKubernetes) {
+ *out = *in
+ if in.Kubelet != nil {
+ in, out := &in.Kubelet, &out.Kubelet
+ *out = new(KubeletConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerKubernetes.
+func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerKubernetes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerSystemComponents) DeepCopyInto(out *WorkerSystemComponents) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSystemComponents.
+func (in *WorkerSystemComponents) DeepCopy() *WorkerSystemComponents {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerSystemComponents)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000..1f87ca3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,138 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&CloudProfile{}, func(obj interface{}) { SetObjectDefaults_CloudProfile(obj.(*CloudProfile)) })
+ scheme.AddTypeDefaultingFunc(&CloudProfileList{}, func(obj interface{}) { SetObjectDefaults_CloudProfileList(obj.(*CloudProfileList)) })
+ scheme.AddTypeDefaultingFunc(&ControllerRegistration{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistration(obj.(*ControllerRegistration)) })
+ scheme.AddTypeDefaultingFunc(&ControllerRegistrationList{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistrationList(obj.(*ControllerRegistrationList)) })
+ scheme.AddTypeDefaultingFunc(&Project{}, func(obj interface{}) { SetObjectDefaults_Project(obj.(*Project)) })
+ scheme.AddTypeDefaultingFunc(&ProjectList{}, func(obj interface{}) { SetObjectDefaults_ProjectList(obj.(*ProjectList)) })
+ scheme.AddTypeDefaultingFunc(&SecretBinding{}, func(obj interface{}) { SetObjectDefaults_SecretBinding(obj.(*SecretBinding)) })
+ scheme.AddTypeDefaultingFunc(&SecretBindingList{}, func(obj interface{}) { SetObjectDefaults_SecretBindingList(obj.(*SecretBindingList)) })
+ scheme.AddTypeDefaultingFunc(&Seed{}, func(obj interface{}) { SetObjectDefaults_Seed(obj.(*Seed)) })
+ scheme.AddTypeDefaultingFunc(&SeedList{}, func(obj interface{}) { SetObjectDefaults_SeedList(obj.(*SeedList)) })
+ scheme.AddTypeDefaultingFunc(&Shoot{}, func(obj interface{}) { SetObjectDefaults_Shoot(obj.(*Shoot)) })
+ scheme.AddTypeDefaultingFunc(&ShootList{}, func(obj interface{}) { SetObjectDefaults_ShootList(obj.(*ShootList)) })
+ return nil
+}
+
+func SetObjectDefaults_CloudProfile(in *CloudProfile) {
+ for i := range in.Spec.MachineTypes {
+ a := &in.Spec.MachineTypes[i]
+ SetDefaults_MachineType(a)
+ }
+ for i := range in.Spec.VolumeTypes {
+ a := &in.Spec.VolumeTypes[i]
+ SetDefaults_VolumeType(a)
+ }
+}
+
+func SetObjectDefaults_CloudProfileList(in *CloudProfileList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_CloudProfile(a)
+ }
+}
+
+func SetObjectDefaults_ControllerRegistration(in *ControllerRegistration) {
+ for i := range in.Spec.Resources {
+ a := &in.Spec.Resources[i]
+ SetDefaults_ControllerResource(a)
+ }
+ if in.Spec.Deployment != nil {
+ SetDefaults_ControllerDeployment(in.Spec.Deployment)
+ }
+}
+
+func SetObjectDefaults_ControllerRegistrationList(in *ControllerRegistrationList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_ControllerRegistration(a)
+ }
+}
+
+func SetObjectDefaults_Project(in *Project) {
+ SetDefaults_Project(in)
+}
+
+func SetObjectDefaults_ProjectList(in *ProjectList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Project(a)
+ }
+}
+
+func SetObjectDefaults_SecretBinding(in *SecretBinding) {
+ SetDefaults_SecretBinding(in)
+}
+
+func SetObjectDefaults_SecretBindingList(in *SecretBindingList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_SecretBinding(a)
+ }
+}
+
+func SetObjectDefaults_Seed(in *Seed) {
+ SetDefaults_Seed(in)
+}
+
+func SetObjectDefaults_SeedList(in *SeedList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Seed(a)
+ }
+}
+
+func SetObjectDefaults_Shoot(in *Shoot) {
+ SetDefaults_Shoot(in)
+ if in.Spec.Addons != nil {
+ if in.Spec.Addons.NginxIngress != nil {
+ SetDefaults_NginxIngress(in.Spec.Addons.NginxIngress)
+ }
+ }
+ if in.Spec.Kubernetes.VerticalPodAutoscaler != nil {
+ SetDefaults_VerticalPodAutoscaler(in.Spec.Kubernetes.VerticalPodAutoscaler)
+ }
+ if in.Spec.Maintenance != nil {
+ SetDefaults_Maintenance(in.Spec.Maintenance)
+ }
+ for i := range in.Spec.Provider.Workers {
+ a := &in.Spec.Provider.Workers[i]
+ SetDefaults_Worker(a)
+ }
+}
+
+func SetObjectDefaults_ShootList(in *ShootList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_Shoot(a)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go
new file mode 100644
index 0000000..8af2eec
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go
@@ -0,0 +1,4112 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package core
+
+import (
+ v1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addon) DeepCopyInto(out *Addon) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
+func (in *Addon) DeepCopy() *Addon {
+ if in == nil {
+ return nil
+ }
+ out := new(Addon)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addons) DeepCopyInto(out *Addons) {
+ *out = *in
+ if in.KubernetesDashboard != nil {
+ in, out := &in.KubernetesDashboard, &out.KubernetesDashboard
+ *out = new(KubernetesDashboard)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NginxIngress != nil {
+ in, out := &in.NginxIngress, &out.NginxIngress
+ *out = new(NginxIngress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addons.
+func (in *Addons) DeepCopy() *Addons {
+ if in == nil {
+ return nil
+ }
+ out := new(Addons)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) {
+ *out = *in
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPlugin.
+func (in *AdmissionPlugin) DeepCopy() *AdmissionPlugin {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionPlugin)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Alerting) DeepCopyInto(out *Alerting) {
+ *out = *in
+ if in.EmailReceivers != nil {
+ in, out := &in.EmailReceivers, &out.EmailReceivers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting.
+func (in *Alerting) DeepCopy() *Alerting {
+ if in == nil {
+ return nil
+ }
+ out := new(Alerting)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
+ *out = *in
+ if in.AuditPolicy != nil {
+ in, out := &in.AuditPolicy, &out.AuditPolicy
+ *out = new(AuditPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
+func (in *AuditConfig) DeepCopy() *AuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditPolicy) DeepCopyInto(out *AuditPolicy) {
+ *out = *in
+ if in.ConfigMapRef != nil {
+ in, out := &in.ConfigMapRef, &out.ConfigMapRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicy.
+func (in *AuditPolicy) DeepCopy() *AuditPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) {
+ *out = *in
+ if in.UnavailableMachineTypes != nil {
+ in, out := &in.UnavailableMachineTypes, &out.UnavailableMachineTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UnavailableVolumeTypes != nil {
+ in, out := &in.UnavailableVolumeTypes, &out.UnavailableVolumeTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone.
+func (in *AvailabilityZone) DeepCopy() *AvailabilityZone {
+ if in == nil {
+ return nil
+ }
+ out := new(AvailabilityZone)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucket) DeepCopyInto(out *BackupBucket) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket.
+func (in *BackupBucket) DeepCopy() *BackupBucket {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucket)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucket) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupBucket, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList.
+func (in *BackupBucketList) DeepCopy() *BackupBucketList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucketList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketProvider) DeepCopyInto(out *BackupBucketProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketProvider.
+func (in *BackupBucketProvider) DeepCopy() *BackupBucketProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) {
+ *out = *in
+ out.Provider = in.Provider
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ out.SecretRef = in.SecretRef
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec.
+func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) {
+ *out = *in
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GeneratedSecretRef != nil {
+ in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus.
+func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntry) DeepCopyInto(out *BackupEntry) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry.
+func (in *BackupEntry) DeepCopy() *BackupEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntry) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList.
+func (in *BackupEntryList) DeepCopy() *BackupEntryList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) {
+ *out = *in
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec.
+func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntrySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) {
+ *out = *in
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus.
+func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CRI) DeepCopyInto(out *CRI) {
+ *out = *in
+ if in.ContainerRuntimes != nil {
+ in, out := &in.ContainerRuntimes, &out.ContainerRuntimes
+ *out = make([]ContainerRuntime, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRI.
+func (in *CRI) DeepCopy() *CRI {
+ if in == nil {
+ return nil
+ }
+ out := new(CRI)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudInfo) DeepCopyInto(out *CloudInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInfo.
+func (in *CloudInfo) DeepCopy() *CloudInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfile) DeepCopyInto(out *CloudProfile) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfile.
+func (in *CloudProfile) DeepCopy() *CloudProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudProfile) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfileList) DeepCopyInto(out *CloudProfileList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CloudProfile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileList.
+func (in *CloudProfileList) DeepCopy() *CloudProfileList {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfileList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudProfileList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) {
+ *out = *in
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ in.Kubernetes.DeepCopyInto(&out.Kubernetes)
+ if in.MachineImages != nil {
+ in, out := &in.MachineImages, &out.MachineImages
+ *out = make([]MachineImage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.MachineTypes != nil {
+ in, out := &in.MachineTypes, &out.MachineTypes
+ *out = make([]MachineType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Regions != nil {
+ in, out := &in.Regions, &out.Regions
+ *out = make([]Region, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(SeedSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VolumeTypes != nil {
+ in, out := &in.VolumeTypes, &out.VolumeTypes
+ *out = make([]VolumeType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileSpec.
+func (in *CloudProfileSpec) DeepCopy() *CloudProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) {
+ *out = *in
+ if in.ScaleDownDelayAfterAdd != nil {
+ in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownDelayAfterDelete != nil {
+ in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownDelayAfterFailure != nil {
+ in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownUnneededTime != nil {
+ in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ScaleDownUtilizationThreshold != nil {
+ in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold
+ *out = new(float64)
+ **out = **in
+ }
+ if in.ScanInterval != nil {
+ in, out := &in.ScanInterval, &out.ScanInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler.
+func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) {
+ *out = *in
+ out.Cloud = in.Cloud
+ out.Kubernetes = in.Kubernetes
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo.
+func (in *ClusterInfo) DeepCopy() *ClusterInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ if in.Codes != nil {
+ in, out := &in.Codes, &out.Codes
+ *out = make([]ErrorCode, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntime.
+func (in *ContainerRuntime) DeepCopy() *ContainerRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Policy != nil {
+ in, out := &in.Policy, &out.Policy
+ *out = new(ControllerDeploymentPolicy)
+ **out = **in
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDeployment.
+func (in *ControllerDeployment) DeepCopy() *ControllerDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallation) DeepCopyInto(out *ControllerInstallation) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallation.
+func (in *ControllerInstallation) DeepCopy() *ControllerInstallation {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerInstallation) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationList) DeepCopyInto(out *ControllerInstallationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerInstallation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationList.
+func (in *ControllerInstallationList) DeepCopy() *ControllerInstallationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerInstallationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationSpec) DeepCopyInto(out *ControllerInstallationSpec) {
+ *out = *in
+ out.RegistrationRef = in.RegistrationRef
+ out.SeedRef = in.SeedRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationSpec.
+func (in *ControllerInstallationSpec) DeepCopy() *ControllerInstallationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallationStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationStatus.
+func (in *ControllerInstallationStatus) DeepCopy() *ControllerInstallationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistration) DeepCopyInto(out *ControllerRegistration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistration.
+func (in *ControllerRegistration) DeepCopy() *ControllerRegistration {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRegistration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistrationList) DeepCopyInto(out *ControllerRegistrationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControllerRegistration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationList.
+func (in *ControllerRegistrationList) DeepCopy() *ControllerRegistrationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistrationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControllerRegistrationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerRegistrationSpec) DeepCopyInto(out *ControllerRegistrationSpec) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ControllerResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Deployment != nil {
+ in, out := &in.Deployment, &out.Deployment
+ *out = new(ControllerDeployment)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationSpec.
+func (in *ControllerRegistrationSpec) DeepCopy() *ControllerRegistrationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerRegistrationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerResource) DeepCopyInto(out *ControllerResource) {
+ *out = *in
+ if in.GloballyEnabled != nil {
+ in, out := &in.GloballyEnabled, &out.GloballyEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ReconcileTimeout != nil {
+ in, out := &in.ReconcileTimeout, &out.ReconcileTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Primary != nil {
+ in, out := &in.Primary, &out.Primary
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerResource.
+func (in *ControllerResource) DeepCopy() *ControllerResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ *out = *in
+ if in.Domain != nil {
+ in, out := &in.Domain, &out.Domain
+ *out = new(string)
+ **out = **in
+ }
+ if in.Providers != nil {
+ in, out := &in.Providers, &out.Providers
+ *out = make([]DNSProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSIncludeExclude) DeepCopyInto(out *DNSIncludeExclude) {
+ *out = *in
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Exclude != nil {
+ in, out := &in.Exclude, &out.Exclude
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSIncludeExclude.
+func (in *DNSIncludeExclude) DeepCopy() *DNSIncludeExclude {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSIncludeExclude)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSProvider) DeepCopyInto(out *DNSProvider) {
+ *out = *in
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Primary != nil {
+ in, out := &in.Primary, &out.Primary
+ *out = new(bool)
+ **out = **in
+ }
+ if in.SecretName != nil {
+ in, out := &in.SecretName, &out.SecretName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider.
+func (in *DNSProvider) DeepCopy() *DNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataVolume) DeepCopyInto(out *DataVolume) {
+ *out = *in
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume.
+func (in *DataVolume) DeepCopy() *DataVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(DataVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoint) DeepCopyInto(out *Endpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
+func (in *Endpoint) DeepCopy() *Endpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(Endpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExpirableVersion) DeepCopyInto(out *ExpirableVersion) {
+ *out = *in
+ if in.ExpirationDate != nil {
+ in, out := &in.ExpirationDate, &out.ExpirationDate
+ *out = (*in).DeepCopy()
+ }
+ if in.Classification != nil {
+ in, out := &in.Classification, &out.Classification
+ *out = new(VersionClassification)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirableVersion.
+func (in *ExpirableVersion) DeepCopy() *ExpirableVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(ExpirableVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Extension) DeepCopyInto(out *Extension) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension.
+func (in *Extension) DeepCopy() *Extension {
+ if in == nil {
+ return nil
+ }
+ out := new(Extension)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtensionResourceState) DeepCopyInto(out *ExtensionResourceState) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(string)
+ **out = **in
+ }
+ if in.State != nil {
+ in, out := &in.State, &out.State
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]NamedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionResourceState.
+func (in *ExtensionResourceState) DeepCopy() *ExtensionResourceState {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtensionResourceState)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Gardener) DeepCopyInto(out *Gardener) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardener.
+func (in *Gardener) DeepCopy() *Gardener {
+ if in == nil {
+ return nil
+ }
+ out := new(Gardener)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GardenerResourceData) DeepCopyInto(out *GardenerResourceData) {
+ *out = *in
+ in.Data.DeepCopyInto(&out.Data)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenerResourceData.
+func (in *GardenerResourceData) DeepCopy() *GardenerResourceData {
+ if in == nil {
+ return nil
+ }
+ out := new(GardenerResourceData)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Hibernation) DeepCopyInto(out *Hibernation) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Schedules != nil {
+ in, out := &in.Schedules, &out.Schedules
+ *out = make([]HibernationSchedule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hibernation.
+func (in *Hibernation) DeepCopy() *Hibernation {
+ if in == nil {
+ return nil
+ }
+ out := new(Hibernation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HibernationSchedule) DeepCopyInto(out *HibernationSchedule) {
+ *out = *in
+ if in.Start != nil {
+ in, out := &in.Start, &out.Start
+ *out = new(string)
+ **out = **in
+ }
+ if in.End != nil {
+ in, out := &in.End, &out.End
+ *out = new(string)
+ **out = **in
+ }
+ if in.Location != nil {
+ in, out := &in.Location, &out.Location
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationSchedule.
+func (in *HibernationSchedule) DeepCopy() *HibernationSchedule {
+ if in == nil {
+ return nil
+ }
+ out := new(HibernationSchedule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) {
+ *out = *in
+ if in.CPUInitializationPeriod != nil {
+ in, out := &in.CPUInitializationPeriod, &out.CPUInitializationPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DownscaleDelay != nil {
+ in, out := &in.DownscaleDelay, &out.DownscaleDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.DownscaleStabilization != nil {
+ in, out := &in.DownscaleStabilization, &out.DownscaleStabilization
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.InitialReadinessDelay != nil {
+ in, out := &in.InitialReadinessDelay, &out.InitialReadinessDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Tolerance != nil {
+ in, out := &in.Tolerance, &out.Tolerance
+ *out = new(float64)
+ **out = **in
+ }
+ if in.UpscaleDelay != nil {
+ in, out := &in.UpscaleDelay, &out.UpscaleDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig.
+func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(HorizontalPodAutoscalerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+ *out = *in
+ in.Controller.DeepCopyInto(&out.Controller)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+ if in == nil {
+ return nil
+ }
+ out := new(Ingress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressController) DeepCopyInto(out *IngressController) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController.
+func (in *IngressController) DeepCopy() *IngressController {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressController)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.AdmissionPlugins != nil {
+ in, out := &in.AdmissionPlugins, &out.AdmissionPlugins
+ *out = make([]AdmissionPlugin, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.APIAudiences != nil {
+ in, out := &in.APIAudiences, &out.APIAudiences
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AuditConfig != nil {
+ in, out := &in.AuditConfig, &out.AuditConfig
+ *out = new(AuditConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EnableBasicAuthentication != nil {
+ in, out := &in.EnableBasicAuthentication, &out.EnableBasicAuthentication
+ *out = new(bool)
+ **out = **in
+ }
+ if in.OIDCConfig != nil {
+ in, out := &in.OIDCConfig, &out.OIDCConfig
+ *out = new(OIDCConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RuntimeConfig != nil {
+ in, out := &in.RuntimeConfig, &out.RuntimeConfig
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ServiceAccountConfig != nil {
+ in, out := &in.ServiceAccountConfig, &out.ServiceAccountConfig
+ *out = new(ServiceAccountConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WatchCacheSizes != nil {
+ in, out := &in.WatchCacheSizes, &out.WatchCacheSizes
+ *out = new(WatchCacheSizes)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = new(KubeAPIServerRequests)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig.
+func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerRequests) DeepCopyInto(out *KubeAPIServerRequests) {
+ *out = *in
+ if in.MaxNonMutatingInflight != nil {
+ in, out := &in.MaxNonMutatingInflight, &out.MaxNonMutatingInflight
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MaxMutatingInflight != nil {
+ in, out := &in.MaxMutatingInflight, &out.MaxMutatingInflight
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerRequests.
+func (in *KubeAPIServerRequests) DeepCopy() *KubeAPIServerRequests {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerRequests)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.HorizontalPodAutoscalerConfig != nil {
+ in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig
+ *out = new(HorizontalPodAutoscalerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodeCIDRMaskSize != nil {
+ in, out := &in.NodeCIDRMaskSize, &out.NodeCIDRMaskSize
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodEvictionTimeout != nil {
+ in, out := &in.PodEvictionTimeout, &out.PodEvictionTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig.
+func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeProxyConfig) DeepCopyInto(out *KubeProxyConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.Mode != nil {
+ in, out := &in.Mode, &out.Mode
+ *out = new(ProxyMode)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfig.
+func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.KubeMaxPDVols != nil {
+ in, out := &in.KubeMaxPDVols, &out.KubeMaxPDVols
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfig.
+func (in *KubeSchedulerConfig) DeepCopy() *KubeSchedulerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeSchedulerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) {
+ *out = *in
+ in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig)
+ if in.CPUCFSQuota != nil {
+ in, out := &in.CPUCFSQuota, &out.CPUCFSQuota
+ *out = new(bool)
+ **out = **in
+ }
+ if in.CPUManagerPolicy != nil {
+ in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy
+ *out = new(string)
+ **out = **in
+ }
+ if in.EvictionHard != nil {
+ in, out := &in.EvictionHard, &out.EvictionHard
+ *out = new(KubeletConfigEviction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionMaxPodGracePeriod != nil {
+ in, out := &in.EvictionMaxPodGracePeriod, &out.EvictionMaxPodGracePeriod
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EvictionMinimumReclaim != nil {
+ in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim
+ *out = new(KubeletConfigEvictionMinimumReclaim)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionPressureTransitionPeriod != nil {
+ in, out := &in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.EvictionSoft != nil {
+ in, out := &in.EvictionSoft, &out.EvictionSoft
+ *out = new(KubeletConfigEviction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EvictionSoftGracePeriod != nil {
+ in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod
+ *out = new(KubeletConfigEvictionSoftGracePeriod)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MaxPods != nil {
+ in, out := &in.MaxPods, &out.MaxPods
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodPIDsLimit != nil {
+ in, out := &in.PodPIDsLimit, &out.PodPIDsLimit
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ImagePullProgressDeadline != nil {
+ in, out := &in.ImagePullProgressDeadline, &out.ImagePullProgressDeadline
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.FailSwapOn != nil {
+ in, out := &in.FailSwapOn, &out.FailSwapOn
+ *out = new(bool)
+ **out = **in
+ }
+ if in.KubeReserved != nil {
+ in, out := &in.KubeReserved, &out.KubeReserved
+ *out = new(KubeletConfigReserved)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SystemReserved != nil {
+ in, out := &in.SystemReserved, &out.SystemReserved
+ *out = new(KubeletConfigReserved)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfig.
+func (in *KubeletConfig) DeepCopy() *KubeletConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEviction) DeepCopyInto(out *KubeletConfigEviction) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEviction.
+func (in *KubeletConfigEviction) DeepCopy() *KubeletConfigEviction {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEviction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEvictionMinimumReclaim) DeepCopyInto(out *KubeletConfigEvictionMinimumReclaim) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionMinimumReclaim.
+func (in *KubeletConfigEvictionMinimumReclaim) DeepCopy() *KubeletConfigEvictionMinimumReclaim {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEvictionMinimumReclaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopyInto(out *KubeletConfigEvictionSoftGracePeriod) {
+ *out = *in
+ if in.MemoryAvailable != nil {
+ in, out := &in.MemoryAvailable, &out.MemoryAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ImageFSAvailable != nil {
+ in, out := &in.ImageFSAvailable, &out.ImageFSAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ImageFSInodesFree != nil {
+ in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.NodeFSAvailable != nil {
+ in, out := &in.NodeFSAvailable, &out.NodeFSAvailable
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.NodeFSInodesFree != nil {
+ in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionSoftGracePeriod.
+func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictionSoftGracePeriod {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigEvictionSoftGracePeriod)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConfigReserved) DeepCopyInto(out *KubeletConfigReserved) {
+ *out = *in
+ if in.CPU != nil {
+ in, out := &in.CPU, &out.CPU
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Memory != nil {
+ in, out := &in.Memory, &out.Memory
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.EphemeralStorage != nil {
+ in, out := &in.EphemeralStorage, &out.EphemeralStorage
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.PID != nil {
+ in, out := &in.PID, &out.PID
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigReserved.
+func (in *KubeletConfigReserved) DeepCopy() *KubeletConfigReserved {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConfigReserved)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Kubernetes) DeepCopyInto(out *Kubernetes) {
+ *out = *in
+ if in.AllowPrivilegedContainers != nil {
+ in, out := &in.AllowPrivilegedContainers, &out.AllowPrivilegedContainers
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ClusterAutoscaler != nil {
+ in, out := &in.ClusterAutoscaler, &out.ClusterAutoscaler
+ *out = new(ClusterAutoscaler)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeAPIServer != nil {
+ in, out := &in.KubeAPIServer, &out.KubeAPIServer
+ *out = new(KubeAPIServerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeControllerManager != nil {
+ in, out := &in.KubeControllerManager, &out.KubeControllerManager
+ *out = new(KubeControllerManagerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeScheduler != nil {
+ in, out := &in.KubeScheduler, &out.KubeScheduler
+ *out = new(KubeSchedulerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeProxy != nil {
+ in, out := &in.KubeProxy, &out.KubeProxy
+ *out = new(KubeProxyConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubelet != nil {
+ in, out := &in.Kubelet, &out.Kubelet
+ *out = new(KubeletConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VerticalPodAutoscaler != nil {
+ in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler
+ *out = new(VerticalPodAutoscaler)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes.
+func (in *Kubernetes) DeepCopy() *Kubernetes {
+ if in == nil {
+ return nil
+ }
+ out := new(Kubernetes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) {
+ *out = *in
+ if in.FeatureGates != nil {
+ in, out := &in.FeatureGates, &out.FeatureGates
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig.
+func (in *KubernetesConfig) DeepCopy() *KubernetesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) {
+ *out = *in
+ out.Addon = in.Addon
+ if in.AuthenticationMode != nil {
+ in, out := &in.AuthenticationMode, &out.AuthenticationMode
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesDashboard.
+func (in *KubernetesDashboard) DeepCopy() *KubernetesDashboard {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesDashboard)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesInfo) DeepCopyInto(out *KubernetesInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInfo.
+func (in *KubernetesInfo) DeepCopy() *KubernetesInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesSettings) DeepCopyInto(out *KubernetesSettings) {
+ *out = *in
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]ExpirableVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSettings.
+func (in *KubernetesSettings) DeepCopy() *KubernetesSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastError) DeepCopyInto(out *LastError) {
+ *out = *in
+ if in.TaskID != nil {
+ in, out := &in.TaskID, &out.TaskID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Codes != nil {
+ in, out := &in.Codes, &out.Codes
+ *out = make([]ErrorCode, len(*in))
+ copy(*out, *in)
+ }
+ if in.LastUpdateTime != nil {
+ in, out := &in.LastUpdateTime, &out.LastUpdateTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError.
+func (in *LastError) DeepCopy() *LastError {
+ if in == nil {
+ return nil
+ }
+ out := new(LastError)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastOperation) DeepCopyInto(out *LastOperation) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation.
+func (in *LastOperation) DeepCopy() *LastOperation {
+ if in == nil {
+ return nil
+ }
+ out := new(LastOperation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Machine) DeepCopyInto(out *Machine) {
+ *out = *in
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ShootMachineImage)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine.
+func (in *Machine) DeepCopy() *Machine {
+ if in == nil {
+ return nil
+ }
+ out := new(Machine)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineControllerManagerSettings) DeepCopyInto(out *MachineControllerManagerSettings) {
+ *out = *in
+ if in.MachineDrainTimeout != nil {
+ in, out := &in.MachineDrainTimeout, &out.MachineDrainTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MachineHealthTimeout != nil {
+ in, out := &in.MachineHealthTimeout, &out.MachineHealthTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MachineCreationTimeout != nil {
+ in, out := &in.MachineCreationTimeout, &out.MachineCreationTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.MaxEvictRetries != nil {
+ in, out := &in.MaxEvictRetries, &out.MaxEvictRetries
+ *out = new(int32)
+ **out = **in
+ }
+ if in.NodeConditions != nil {
+ in, out := &in.NodeConditions, &out.NodeConditions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineControllerManagerSettings.
+func (in *MachineControllerManagerSettings) DeepCopy() *MachineControllerManagerSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineControllerManagerSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImage) DeepCopyInto(out *MachineImage) {
+ *out = *in
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]MachineImageVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage.
+func (in *MachineImage) DeepCopy() *MachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImageVersion) DeepCopyInto(out *MachineImageVersion) {
+ *out = *in
+ in.ExpirableVersion.DeepCopyInto(&out.ExpirableVersion)
+ if in.CRI != nil {
+ in, out := &in.CRI, &out.CRI
+ *out = make([]CRI, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImageVersion.
+func (in *MachineImageVersion) DeepCopy() *MachineImageVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImageVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineType) DeepCopyInto(out *MachineType) {
+ *out = *in
+ out.CPU = in.CPU.DeepCopy()
+ out.GPU = in.GPU.DeepCopy()
+ out.Memory = in.Memory.DeepCopy()
+ if in.Storage != nil {
+ in, out := &in.Storage, &out.Storage
+ *out = new(MachineTypeStorage)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Usable != nil {
+ in, out := &in.Usable, &out.Usable
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineType.
+func (in *MachineType) DeepCopy() *MachineType {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineTypeStorage) DeepCopyInto(out *MachineTypeStorage) {
+ *out = *in
+ out.StorageSize = in.StorageSize.DeepCopy()
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTypeStorage.
+func (in *MachineTypeStorage) DeepCopy() *MachineTypeStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineTypeStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Maintenance) DeepCopyInto(out *Maintenance) {
+ *out = *in
+ if in.AutoUpdate != nil {
+ in, out := &in.AutoUpdate, &out.AutoUpdate
+ *out = new(MaintenanceAutoUpdate)
+ **out = **in
+ }
+ if in.TimeWindow != nil {
+ in, out := &in.TimeWindow, &out.TimeWindow
+ *out = new(MaintenanceTimeWindow)
+ **out = **in
+ }
+ if in.ConfineSpecUpdateRollout != nil {
+ in, out := &in.ConfineSpecUpdateRollout, &out.ConfineSpecUpdateRollout
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintenance.
+func (in *Maintenance) DeepCopy() *Maintenance {
+ if in == nil {
+ return nil
+ }
+ out := new(Maintenance)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaintenanceAutoUpdate) DeepCopyInto(out *MaintenanceAutoUpdate) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceAutoUpdate.
+func (in *MaintenanceAutoUpdate) DeepCopy() *MaintenanceAutoUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(MaintenanceAutoUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow.
+func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow {
+ if in == nil {
+ return nil
+ }
+ out := new(MaintenanceTimeWindow)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Monitoring) DeepCopyInto(out *Monitoring) {
+ *out = *in
+ if in.Alerting != nil {
+ in, out := &in.Alerting, &out.Alerting
+ *out = new(Alerting)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring.
+func (in *Monitoring) DeepCopy() *Monitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(Monitoring)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedResourceReference) DeepCopyInto(out *NamedResourceReference) {
+ *out = *in
+ out.ResourceRef = in.ResourceRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourceReference.
+func (in *NamedResourceReference) DeepCopy() *NamedResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Networking) DeepCopyInto(out *Networking) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = new(string)
+ **out = **in
+ }
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = new(string)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking.
+func (in *Networking) DeepCopy() *Networking {
+ if in == nil {
+ return nil
+ }
+ out := new(Networking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NginxIngress) DeepCopyInto(out *NginxIngress) {
+ *out = *in
+ out.Addon = in.Addon
+ if in.LoadBalancerSourceRanges != nil {
+ in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ExternalTrafficPolicy != nil {
+ in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy
+ *out = new(v1.ServiceExternalTrafficPolicyType)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxIngress.
+func (in *NginxIngress) DeepCopy() *NginxIngress {
+ if in == nil {
+ return nil
+ }
+ out := new(NginxIngress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) {
+ *out = *in
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClientAuthentication != nil {
+ in, out := &in.ClientAuthentication, &out.ClientAuthentication
+ *out = new(OpenIDConnectClientAuthentication)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ClientID != nil {
+ in, out := &in.ClientID, &out.ClientID
+ *out = new(string)
+ **out = **in
+ }
+ if in.GroupsClaim != nil {
+ in, out := &in.GroupsClaim, &out.GroupsClaim
+ *out = new(string)
+ **out = **in
+ }
+ if in.GroupsPrefix != nil {
+ in, out := &in.GroupsPrefix, &out.GroupsPrefix
+ *out = new(string)
+ **out = **in
+ }
+ if in.IssuerURL != nil {
+ in, out := &in.IssuerURL, &out.IssuerURL
+ *out = new(string)
+ **out = **in
+ }
+ if in.RequiredClaims != nil {
+ in, out := &in.RequiredClaims, &out.RequiredClaims
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SigningAlgs != nil {
+ in, out := &in.SigningAlgs, &out.SigningAlgs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsernameClaim != nil {
+ in, out := &in.UsernameClaim, &out.UsernameClaim
+ *out = new(string)
+ **out = **in
+ }
+ if in.UsernamePrefix != nil {
+ in, out := &in.UsernamePrefix, &out.UsernamePrefix
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig.
+func (in *OIDCConfig) DeepCopy() *OIDCConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) {
+ *out = *in
+ if in.ExtraConfig != nil {
+ in, out := &in.ExtraConfig, &out.ExtraConfig
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication.
+func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDConnectClientAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Plant) DeepCopyInto(out *Plant) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plant.
+func (in *Plant) DeepCopy() *Plant {
+ if in == nil {
+ return nil
+ }
+ out := new(Plant)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Plant) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantList) DeepCopyInto(out *PlantList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Plant, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantList.
+func (in *PlantList) DeepCopy() *PlantList {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PlantList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantSpec) DeepCopyInto(out *PlantSpec) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]Endpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantSpec.
+func (in *PlantSpec) DeepCopy() *PlantSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlantStatus) DeepCopyInto(out *PlantStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ObservedGeneration != nil {
+ in, out := &in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ClusterInfo != nil {
+ in, out := &in.ClusterInfo, &out.ClusterInfo
+ *out = new(ClusterInfo)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantStatus.
+func (in *PlantStatus) DeepCopy() *PlantStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PlantStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Project) DeepCopyInto(out *Project) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
+func (in *Project) DeepCopy() *Project {
+ if in == nil {
+ return nil
+ }
+ out := new(Project)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Project) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectList) DeepCopyInto(out *ProjectList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
+func (in *ProjectList) DeepCopy() *ProjectList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectMember) DeepCopyInto(out *ProjectMember) {
+ *out = *in
+ out.Subject = in.Subject
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectMember.
+func (in *ProjectMember) DeepCopy() *ProjectMember {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectMember)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
+ *out = *in
+ if in.CreatedBy != nil {
+ in, out := &in.CreatedBy, &out.CreatedBy
+ *out = new(rbacv1.Subject)
+ **out = **in
+ }
+ if in.Description != nil {
+ in, out := &in.Description, &out.Description
+ *out = new(string)
+ **out = **in
+ }
+ if in.Owner != nil {
+ in, out := &in.Owner, &out.Owner
+ *out = new(rbacv1.Subject)
+ **out = **in
+ }
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(string)
+ **out = **in
+ }
+ if in.Members != nil {
+ in, out := &in.Members, &out.Members
+ *out = make([]ProjectMember, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Namespace != nil {
+ in, out := &in.Namespace, &out.Namespace
+ *out = new(string)
+ **out = **in
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = new(ProjectTolerations)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
+func (in *ProjectSpec) DeepCopy() *ProjectSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
+ *out = *in
+ if in.StaleSinceTimestamp != nil {
+ in, out := &in.StaleSinceTimestamp, &out.StaleSinceTimestamp
+ *out = (*in).DeepCopy()
+ }
+ if in.StaleAutoDeleteTimestamp != nil {
+ in, out := &in.StaleAutoDeleteTimestamp, &out.StaleAutoDeleteTimestamp
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
+func (in *ProjectStatus) DeepCopy() *ProjectStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectTolerations) DeepCopyInto(out *ProjectTolerations) {
+ *out = *in
+ if in.Defaults != nil {
+ in, out := &in.Defaults, &out.Defaults
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Whitelist != nil {
+ in, out := &in.Whitelist, &out.Whitelist
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectTolerations.
+func (in *ProjectTolerations) DeepCopy() *ProjectTolerations {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectTolerations)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Provider) DeepCopyInto(out *Provider) {
+ *out = *in
+ if in.ControlPlaneConfig != nil {
+ in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.InfrastructureConfig != nil {
+ in, out := &in.InfrastructureConfig, &out.InfrastructureConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Workers != nil {
+ in, out := &in.Workers, &out.Workers
+ *out = make([]Worker, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider.
+func (in *Provider) DeepCopy() *Provider {
+ if in == nil {
+ return nil
+ }
+ out := new(Provider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quota) DeepCopyInto(out *Quota) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota.
+func (in *Quota) DeepCopy() *Quota {
+ if in == nil {
+ return nil
+ }
+ out := new(Quota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Quota) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaList) DeepCopyInto(out *QuotaList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Quota, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaList.
+func (in *QuotaList) DeepCopy() *QuotaList {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *QuotaList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) {
+ *out = *in
+ if in.ClusterLifetimeDays != nil {
+ in, out := &in.ClusterLifetimeDays, &out.ClusterLifetimeDays
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Metrics != nil {
+ in, out := &in.Metrics, &out.Metrics
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ out.Scope = in.Scope
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec.
+func (in *QuotaSpec) DeepCopy() *QuotaSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Region) DeepCopyInto(out *Region) {
+ *out = *in
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]AvailabilityZone, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region.
+func (in *Region) DeepCopy() *Region {
+ if in == nil {
+ return nil
+ }
+ out := new(Region)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceData) DeepCopyInto(out *ResourceData) {
+ *out = *in
+ out.CrossVersionObjectReference = in.CrossVersionObjectReference
+ in.Data.DeepCopyInto(&out.Data)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceData.
+func (in *ResourceData) DeepCopy() *ResourceData {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceData)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceWatchCacheSize) DeepCopyInto(out *ResourceWatchCacheSize) {
+ *out = *in
+ if in.APIGroup != nil {
+ in, out := &in.APIGroup, &out.APIGroup
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceWatchCacheSize.
+func (in *ResourceWatchCacheSize) DeepCopy() *ResourceWatchCacheSize {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceWatchCacheSize)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBinding) DeepCopyInto(out *SecretBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.SecretRef = in.SecretRef
+ if in.Quotas != nil {
+ in, out := &in.Quotas, &out.Quotas
+ *out = make([]v1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding.
+func (in *SecretBinding) DeepCopy() *SecretBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBindingList) DeepCopyInto(out *SecretBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]SecretBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBindingList.
+func (in *SecretBindingList) DeepCopy() *SecretBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Seed) DeepCopyInto(out *Seed) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seed.
+func (in *Seed) DeepCopy() *Seed {
+ if in == nil {
+ return nil
+ }
+ out := new(Seed)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Seed) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedBackup) DeepCopyInto(out *SeedBackup) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Region != nil {
+ in, out := &in.Region, &out.Region
+ *out = new(string)
+ **out = **in
+ }
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedBackup.
+func (in *SeedBackup) DeepCopy() *SeedBackup {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedBackup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedDNS) DeepCopyInto(out *SeedDNS) {
+ *out = *in
+ if in.IngressDomain != nil {
+ in, out := &in.IngressDomain, &out.IngressDomain
+ *out = new(string)
+ **out = **in
+ }
+ if in.Provider != nil {
+ in, out := &in.Provider, &out.Provider
+ *out = new(SeedDNSProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS.
+func (in *SeedDNS) DeepCopy() *SeedDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedDNSProvider) DeepCopyInto(out *SeedDNSProvider) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ if in.Domains != nil {
+ in, out := &in.Domains, &out.Domains
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = new(DNSIncludeExclude)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNSProvider.
+func (in *SeedDNSProvider) DeepCopy() *SeedDNSProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedDNSProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedList) DeepCopyInto(out *SeedList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Seed, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedList.
+func (in *SeedList) DeepCopy() *SeedList {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SeedList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedNetworks) DeepCopyInto(out *SeedNetworks) {
+ *out = *in
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = new(string)
+ **out = **in
+ }
+ if in.ShootDefaults != nil {
+ in, out := &in.ShootDefaults, &out.ShootDefaults
+ *out = new(ShootNetworks)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BlockCIDRs != nil {
+ in, out := &in.BlockCIDRs, &out.BlockCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedNetworks.
+func (in *SeedNetworks) DeepCopy() *SeedNetworks {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedNetworks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedProvider) DeepCopyInto(out *SeedProvider) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedProvider.
+func (in *SeedProvider) DeepCopy() *SeedProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSelector) DeepCopyInto(out *SeedSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ProviderTypes != nil {
+ in, out := &in.ProviderTypes, &out.ProviderTypes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSelector.
+func (in *SeedSelector) DeepCopy() *SeedSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingExcessCapacityReservation) DeepCopyInto(out *SeedSettingExcessCapacityReservation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingExcessCapacityReservation.
+func (in *SeedSettingExcessCapacityReservation) DeepCopy() *SeedSettingExcessCapacityReservation {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingExcessCapacityReservation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingLoadBalancerServices) DeepCopyInto(out *SeedSettingLoadBalancerServices) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingLoadBalancerServices.
+func (in *SeedSettingLoadBalancerServices) DeepCopy() *SeedSettingLoadBalancerServices {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingLoadBalancerServices)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingScheduling) DeepCopyInto(out *SeedSettingScheduling) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingScheduling.
+func (in *SeedSettingScheduling) DeepCopy() *SeedSettingScheduling {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingScheduling)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingShootDNS) DeepCopyInto(out *SeedSettingShootDNS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingShootDNS.
+func (in *SeedSettingShootDNS) DeepCopy() *SeedSettingShootDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingShootDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettingVerticalPodAutoscaler) DeepCopyInto(out *SeedSettingVerticalPodAutoscaler) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingVerticalPodAutoscaler.
+func (in *SeedSettingVerticalPodAutoscaler) DeepCopy() *SeedSettingVerticalPodAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettingVerticalPodAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSettings) DeepCopyInto(out *SeedSettings) {
+ *out = *in
+ if in.ExcessCapacityReservation != nil {
+ in, out := &in.ExcessCapacityReservation, &out.ExcessCapacityReservation
+ *out = new(SeedSettingExcessCapacityReservation)
+ **out = **in
+ }
+ if in.Scheduling != nil {
+ in, out := &in.Scheduling, &out.Scheduling
+ *out = new(SeedSettingScheduling)
+ **out = **in
+ }
+ if in.ShootDNS != nil {
+ in, out := &in.ShootDNS, &out.ShootDNS
+ *out = new(SeedSettingShootDNS)
+ **out = **in
+ }
+ if in.LoadBalancerServices != nil {
+ in, out := &in.LoadBalancerServices, &out.LoadBalancerServices
+ *out = new(SeedSettingLoadBalancerServices)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VerticalPodAutoscaler != nil {
+ in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler
+ *out = new(SeedSettingVerticalPodAutoscaler)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettings.
+func (in *SeedSettings) DeepCopy() *SeedSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedSpec) DeepCopyInto(out *SeedSpec) {
+ *out = *in
+ if in.Backup != nil {
+ in, out := &in.Backup, &out.Backup
+ *out = new(SeedBackup)
+ (*in).DeepCopyInto(*out)
+ }
+ in.DNS.DeepCopyInto(&out.DNS)
+ in.Networks.DeepCopyInto(&out.Networks)
+ in.Provider.DeepCopyInto(&out.Provider)
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(SeedSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]SeedTaint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(SeedVolume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = new(Ingress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSpec.
+func (in *SeedSpec) DeepCopy() *SeedSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedStatus) DeepCopyInto(out *SeedStatus) {
+ *out = *in
+ if in.Gardener != nil {
+ in, out := &in.Gardener, &out.Gardener
+ *out = new(Gardener)
+ **out = **in
+ }
+ if in.KubernetesVersion != nil {
+ in, out := &in.KubernetesVersion, &out.KubernetesVersion
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ClusterIdentity != nil {
+ in, out := &in.ClusterIdentity, &out.ClusterIdentity
+ *out = new(string)
+ **out = **in
+ }
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ if in.Allocatable != nil {
+ in, out := &in.Allocatable, &out.Allocatable
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedStatus.
+func (in *SeedStatus) DeepCopy() *SeedStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedTaint) DeepCopyInto(out *SeedTaint) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTaint.
+func (in *SeedTaint) DeepCopy() *SeedTaint {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedTaint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedVolume) DeepCopyInto(out *SeedVolume) {
+ *out = *in
+ if in.MinimumSize != nil {
+ in, out := &in.MinimumSize, &out.MinimumSize
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Providers != nil {
+ in, out := &in.Providers, &out.Providers
+ *out = make([]SeedVolumeProvider, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolume.
+func (in *SeedVolume) DeepCopy() *SeedVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedVolumeProvider) DeepCopyInto(out *SeedVolumeProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolumeProvider.
+func (in *SeedVolumeProvider) DeepCopy() *SeedVolumeProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedVolumeProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) {
+ *out = *in
+ if in.Issuer != nil {
+ in, out := &in.Issuer, &out.Issuer
+ *out = new(string)
+ **out = **in
+ }
+ if in.SigningKeySecret != nil {
+ in, out := &in.SigningKeySecret, &out.SigningKeySecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig.
+func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Shoot) DeepCopyInto(out *Shoot) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot.
+func (in *Shoot) DeepCopy() *Shoot {
+ if in == nil {
+ return nil
+ }
+ out := new(Shoot)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Shoot) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootList) DeepCopyInto(out *ShootList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Shoot, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootList.
+func (in *ShootList) DeepCopy() *ShootList {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootMachineImage.
+func (in *ShootMachineImage) DeepCopy() *ShootMachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootMachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootNetworks) DeepCopyInto(out *ShootNetworks) {
+ *out = *in
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = new(string)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootNetworks.
+func (in *ShootNetworks) DeepCopy() *ShootNetworks {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootNetworks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootSpec) DeepCopyInto(out *ShootSpec) {
+ *out = *in
+ if in.Addons != nil {
+ in, out := &in.Addons, &out.Addons
+ *out = new(Addons)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = new(DNS)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Extensions != nil {
+ in, out := &in.Extensions, &out.Extensions
+ *out = make([]Extension, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Hibernation != nil {
+ in, out := &in.Hibernation, &out.Hibernation
+ *out = new(Hibernation)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Kubernetes.DeepCopyInto(&out.Kubernetes)
+ in.Networking.DeepCopyInto(&out.Networking)
+ if in.Maintenance != nil {
+ in, out := &in.Maintenance, &out.Maintenance
+ *out = new(Maintenance)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Monitoring != nil {
+ in, out := &in.Monitoring, &out.Monitoring
+ *out = new(Monitoring)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Provider.DeepCopyInto(&out.Provider)
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(ShootPurpose)
+ **out = **in
+ }
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(SeedSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]NamedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootSpec.
+func (in *ShootSpec) DeepCopy() *ShootSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootState) DeepCopyInto(out *ShootState) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootState.
+func (in *ShootState) DeepCopy() *ShootState {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootState)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootState) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStateList) DeepCopyInto(out *ShootStateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ShootState, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateList.
+func (in *ShootStateList) DeepCopy() *ShootStateList {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ShootStateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStateSpec) DeepCopyInto(out *ShootStateSpec) {
+ *out = *in
+ if in.Gardener != nil {
+ in, out := &in.Gardener, &out.Gardener
+ *out = make([]GardenerResourceData, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Extensions != nil {
+ in, out := &in.Extensions, &out.Extensions
+ *out = make([]ExtensionResourceState, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ResourceData, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSpec.
+func (in *ShootStateSpec) DeepCopy() *ShootStateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStatus) DeepCopyInto(out *ShootStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Constraints != nil {
+ in, out := &in.Constraints, &out.Constraints
+ *out = make([]Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.Gardener = in.Gardener
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastErrors != nil {
+ in, out := &in.LastErrors, &out.LastErrors
+ *out = make([]LastError, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RetryCycleStartTime != nil {
+ in, out := &in.RetryCycleStartTime, &out.RetryCycleStartTime
+ *out = (*in).DeepCopy()
+ }
+ if in.SeedName != nil {
+ in, out := &in.SeedName, &out.SeedName
+ *out = new(string)
+ **out = **in
+ }
+ if in.ClusterIdentity != nil {
+ in, out := &in.ClusterIdentity, &out.ClusterIdentity
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStatus.
+func (in *ShootStatus) DeepCopy() *ShootStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Toleration) DeepCopyInto(out *Toleration) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration.
+func (in *Toleration) DeepCopy() *Toleration {
+ if in == nil {
+ return nil
+ }
+ out := new(Toleration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) {
+ *out = *in
+ if in.EvictAfterOOMThreshold != nil {
+ in, out := &in.EvictAfterOOMThreshold, &out.EvictAfterOOMThreshold
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.EvictionRateBurst != nil {
+ in, out := &in.EvictionRateBurst, &out.EvictionRateBurst
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EvictionRateLimit != nil {
+ in, out := &in.EvictionRateLimit, &out.EvictionRateLimit
+ *out = new(float64)
+ **out = **in
+ }
+ if in.EvictionTolerance != nil {
+ in, out := &in.EvictionTolerance, &out.EvictionTolerance
+ *out = new(float64)
+ **out = **in
+ }
+ if in.RecommendationMarginFraction != nil {
+ in, out := &in.RecommendationMarginFraction, &out.RecommendationMarginFraction
+ *out = new(float64)
+ **out = **in
+ }
+ if in.UpdaterInterval != nil {
+ in, out := &in.UpdaterInterval, &out.UpdaterInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.RecommenderInterval != nil {
+ in, out := &in.RecommenderInterval, &out.RecommenderInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler.
+func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler {
+ if in == nil {
+ return nil
+ }
+ out := new(VerticalPodAutoscaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+ if in == nil {
+ return nil
+ }
+ out := new(Volume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeType) DeepCopyInto(out *VolumeType) {
+ *out = *in
+ if in.Usable != nil {
+ in, out := &in.Usable, &out.Usable
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeType.
+func (in *VolumeType) DeepCopy() *VolumeType {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WatchCacheSizes) DeepCopyInto(out *WatchCacheSizes) {
+ *out = *in
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ResourceWatchCacheSize, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchCacheSizes.
+func (in *WatchCacheSizes) DeepCopy() *WatchCacheSizes {
+ if in == nil {
+ return nil
+ }
+ out := new(WatchCacheSizes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Worker) DeepCopyInto(out *Worker) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = new(string)
+ **out = **in
+ }
+ if in.CRI != nil {
+ in, out := &in.CRI, &out.CRI
+ *out = new(CRI)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubernetes != nil {
+ in, out := &in.Kubernetes, &out.Kubernetes
+ *out = new(WorkerKubernetes)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Machine.DeepCopyInto(&out.Machine)
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SystemComponents != nil {
+ in, out := &in.SystemComponents, &out.SystemComponents
+ *out = new(WorkerSystemComponents)
+ **out = **in
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]v1.Taint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(Volume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DataVolumes != nil {
+ in, out := &in.DataVolumes, &out.DataVolumes
+ *out = make([]DataVolume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.KubeletDataVolumeName != nil {
+ in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.MachineControllerManagerSettings != nil {
+ in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings
+ *out = new(MachineControllerManagerSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker.
+func (in *Worker) DeepCopy() *Worker {
+ if in == nil {
+ return nil
+ }
+ out := new(Worker)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerKubernetes) DeepCopyInto(out *WorkerKubernetes) {
+ *out = *in
+ if in.Kubelet != nil {
+ in, out := &in.Kubelet, &out.Kubelet
+ *out = new(KubeletConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerKubernetes.
+func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerKubernetes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerSystemComponents) DeepCopyInto(out *WorkerSystemComponents) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSystemComponents.
+func (in *WorkerSystemComponents) DeepCopy() *WorkerSystemComponents {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerSystemComponents)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go
new file mode 100644
index 0000000..c074510
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extensions
+
+const (
+ GroupName = "extensions.gardener.cloud"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go
new file mode 100644
index 0000000..eceb2a6
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +k8s:deepcopy-gen=package
+
+//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/extensions-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../hack/api-reference/extensions.md
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +groupName=extensions.gardener.cloud
+package v1alpha1
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go
new file mode 100644
index 0000000..32e4f7d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "github.com/gardener/gardener/pkg/apis/extensions"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: extensions.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &BackupBucket{},
+ &BackupBucketList{},
+ &BackupEntry{},
+ &BackupEntryList{},
+ &Cluster{},
+ &ClusterList{},
+ &ContainerRuntime{},
+ &ContainerRuntimeList{},
+ &ControlPlane{},
+ &ControlPlaneList{},
+ &Extension{},
+ &ExtensionList{},
+ &Infrastructure{},
+ &InfrastructureList{},
+ &Network{},
+ &NetworkList{},
+ &OperatingSystemConfig{},
+ &OperatingSystemConfigList{},
+ &Worker{},
+ &WorkerList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go
new file mode 100644
index 0000000..009e8ac
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ dnsv1alpha1 "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Status is the status of an Object.
+type Status interface {
+ // GetProviderStatus retrieves the provider status.
+ GetProviderStatus() *runtime.RawExtension
+ // GetConditions retrieves the Conditions of a status.
+ // Conditions may be nil.
+ GetConditions() []gardencorev1beta1.Condition
+ // SetConditions sets the Conditions of a status.
+ SetConditions([]gardencorev1beta1.Condition)
+ // GetLastOperation retrieves the LastOperation of a status.
+ // LastOperation may be nil.
+ GetLastOperation() *gardencorev1beta1.LastOperation
+ // GetObservedGeneration retrieves the last generation observed by the extension controller.
+ GetObservedGeneration() int64
+ // GetLastError retrieves the LastError of a status.
+ // LastError may be nil.
+ GetLastError() *gardencorev1beta1.LastError
+ // GetState retrieves the State of the extension
+ GetState() *runtime.RawExtension
+ // SetState sets the State of the extension
+ SetState(state *runtime.RawExtension)
+ // GetResources retrieves the list of named resource references referred to in the State by their names.
+ GetResources() []gardencorev1beta1.NamedResourceReference
+ // SetResources sets a list of named resource references in the Status, that are referred by
+ // their names in the State.
+ SetResources(namedResourceReferences []gardencorev1beta1.NamedResourceReference)
+}
+
+// Spec is the spec section of an Object.
+type Spec interface {
+ // GetExtensionType retrieves the extension type.
+ GetExtensionType() string
+ // GetExtensionPurpose retrieves the extension purpose.
+ GetExtensionPurpose() *string
+ // GetProviderConfig retrieves the provider config.
+ GetProviderConfig() *runtime.RawExtension
+}
+
+// Object is an extension object resource.
+type Object interface {
+ metav1.Object
+ runtime.Object
+
+ // GetExtensionSpec retrieves the object's spec.
+ GetExtensionSpec() Spec
+ // GetExtensionStatus retrieves the object's status.
+ GetExtensionStatus() Status
+}
+
+// ExtensionKinds contains all supported extension kinds.
+var ExtensionKinds = sets.NewString(
+ BackupBucketResource,
+ BackupEntryResource,
+ ContainerRuntimeResource,
+ ControlPlaneResource,
+ dnsv1alpha1.DNSProviderKind,
+ ExtensionResource,
+ InfrastructureResource,
+ NetworkResource,
+ OperatingSystemConfigResource,
+ WorkerResource,
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go
new file mode 100644
index 0000000..d5ddb39
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ Object = (*BackupBucket)(nil)
+
+// BackupBucketResource is a constant for the name of the BackupBucket resource.
+const BackupBucketResource = "BackupBucket"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucket is a specification for backup bucket.
+type BackupBucket struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec BackupBucketSpec `json:"spec"`
+ Status BackupBucketStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *BackupBucket) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *BackupBucket) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupBucketList is a list of BackupBucket resources.
+type BackupBucketList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of BackupBucket.
+ Items []BackupBucket `json:"items"`
+}
+
+// BackupBucketSpec is the spec for an BackupBucket resource.
+type BackupBucketSpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+ // Region is the region of this bucket.
+ Region string `json:"region"`
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ SecretRef corev1.SecretReference `json:"secretRef"`
+}
+
+// BackupBucketStatus is the status for an BackupBucket resource.
+type BackupBucketStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+ // GeneratedSecretRef is reference to the secret generated by backup bucket, which
+ // will have object store specific credentials.
+ // +optional
+ GeneratedSecretRef *corev1.SecretReference `json:"generatedSecretRef,omitempty"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go
new file mode 100644
index 0000000..146ed60
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var _ Object = (*BackupEntry)(nil)
+
+// BackupEntryResource is a constant for the name of the BackupEntry resource.
+const BackupEntryResource = "BackupEntry"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntry is a specification for backup Entry.
+type BackupEntry struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec BackupEntrySpec `json:"spec"`
+ Status BackupEntryStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *BackupEntry) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *BackupEntry) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupEntryList is a list of BackupEntry resources.
+type BackupEntryList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of BackupEntry.
+ Items []BackupEntry `json:"items"`
+}
+
+// BackupEntrySpec is the spec for an BackupEntry resource.
+type BackupEntrySpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+ // BackupBucketProviderStatus contains the provider status that has
+ // been generated by the controller responsible for the `BackupBucket` resource.
+ // +optional
+ BackupBucketProviderStatus *runtime.RawExtension `json:"backupBucketProviderStatus,omitempty"`
+ // Region is the region of this Entry.
+ Region string `json:"region"`
+ // BucketName is the name of backup bucket for this Backup Entry.
+ BucketName string `json:"bucketName"`
+ // SecretRef is a reference to a secret that contains the credentials to access object store.
+ SecretRef corev1.SecretReference `json:"secretRef"`
+}
+
+// BackupEntryStatus is the status for an BackupEntry resource.
+type BackupEntryStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go
new file mode 100644
index 0000000..ccfda96
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// ClusterResource is a constant for the name of the Cluster resource.
+const ClusterResource = "Cluster"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Cluster is a specification for a Cluster resource.
+type Cluster struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ClusterSpec `json:"spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterList is a list of Cluster resources.
+type ClusterList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is the list of Cluster.
+ Items []Cluster `json:"items"`
+}
+
+// ClusterSpec is the spec for a Cluster resource.
+type ClusterSpec struct {
+ // CloudProfile is a raw extension field that contains the cloudprofile resource referenced
+ // by the shoot that has to be reconciled.
+ CloudProfile runtime.RawExtension `json:"cloudProfile"`
+ // Seed is a raw extension field that contains the seed resource referenced by the shoot that
+ // has to be reconciled.
+ Seed runtime.RawExtension `json:"seed"`
+ // Shoot is a raw extension field that contains the shoot resource that has to be reconciled.
+ Shoot runtime.RawExtension `json:"shoot"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go
new file mode 100644
index 0000000..a911a15
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ Object = (*ContainerRuntime)(nil)
+
+const (
+ // ContainerRuntimeResource is a constant for the name of the Container Runtime Extension resource.
+ ContainerRuntimeResource = "ContainerRuntime"
+ // CRINameWorkerLabel is the name of the label describing the CRI name used in this node.
+ CRINameWorkerLabel = "worker.gardener.cloud/cri-name"
+ // ContainerRuntimeNameWorkerLabel is a label describing a Container Runtime which should be supported on the node.
+ ContainerRuntimeNameWorkerLabel = "containerruntime.worker.gardener.cloud/%s"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ContainerRuntime is a specification for a container runtime resource.
+type ContainerRuntime struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec ContainerRuntimeSpec `json:"spec"`
+ Status ContainerRuntimeStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *ContainerRuntime) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *ContainerRuntime) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ContainerRuntimeList is a list of ContainerRuntime resources.
+type ContainerRuntimeList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []ContainerRuntime `json:"items"`
+}
+
+// ContainerRuntimeSpec is the spec for a ContainerRuntime resource.
+type ContainerRuntimeSpec struct {
+ // BinaryPath is the Worker's machine path where container runtime extensions should copy the binaries to.
+ BinaryPath string `json:"binaryPath"`
+ // WorkerPool identifies the worker pool of the Shoot.
+ // For each worker pool and type, Gardener deploys a ContainerRuntime CRD.
+ WorkerPool ContainerRuntimeWorkerPool `json:"workerPool"`
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+}
+
+type ContainerRuntimeWorkerPool struct {
+ // Name specifies the name of the worker pool the container runtime should be available for.
+ Name string `json:"name"`
+ // Selector is the label selector used by the extension to match the nodes belonging to the worker pool.
+ Selector metav1.LabelSelector `json:"selector"`
+}
+
+// ContainerRuntimeStatus is the status for a ContainerRuntime resource.
+type ContainerRuntimeStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go
new file mode 100644
index 0000000..563edba
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go
@@ -0,0 +1,97 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var _ Object = (*ControlPlane)(nil)
+
+// ControlPlaneResource is a constant for the name of the ControlPlane resource.
+const ControlPlaneResource = "ControlPlane"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControlPlane is a specification for a ControlPlane resource.
+type ControlPlane struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ControlPlaneSpec `json:"spec"`
+ Status ControlPlaneStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *ControlPlane) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *ControlPlane) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// GetExtensionPurpose implements Object.
+func (i *ControlPlaneSpec) GetExtensionPurpose() *string {
+ return (*string)(i.Purpose)
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControlPlaneList is a list of ControlPlane resources.
+type ControlPlaneList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is the list of ControlPlanes.
+ Items []ControlPlane `json:"items"`
+}
+
+// ControlPlaneSpec is the spec of a ControlPlane resource.
+type ControlPlaneSpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+ // Purpose contains the data if a cloud provider needs additional components in order to expose the control plane.
+ // +optional
+ Purpose *Purpose `json:"purpose,omitempty"`
+ // InfrastructureProviderStatus contains the provider status that has
+ // been generated by the controller responsible for the `Infrastructure` resource.
+ // +optional
+ InfrastructureProviderStatus *runtime.RawExtension `json:"infrastructureProviderStatus,omitempty"`
+ // Region is the region of this control plane.
+ Region string `json:"region"`
+ // SecretRef is a reference to a secret that contains the cloud provider specific credentials.
+ SecretRef corev1.SecretReference `json:"secretRef"`
+}
+
+// ControlPlaneStatus is the status of a ControlPlane resource.
+type ControlPlaneStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+}
+
+// Purpose is a string alias.
+type Purpose string
+
+const (
+ // Normal triggers the ControlPlane controllers for the shoot provider.
+ Normal Purpose = "normal"
+ // Exposure triggers the ControlPlane controllers for the exposure settings.
+ Exposure Purpose = "exposure"
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go
new file mode 100644
index 0000000..619d479
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DefaultSpec contains common status fields for every extension resource.
+type DefaultSpec struct {
+ // Type contains the instance of the resource's kind.
+ Type string `json:"type"`
+ // ProviderConfig is the provider specific configuration.
+ // +optional
+
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"`
+}
+
+// GetExtensionType implements Spec.
+func (d *DefaultSpec) GetExtensionType() string {
+ return d.Type
+}
+
+// GetExtensionPurpose implements Spec.
+func (d *DefaultSpec) GetExtensionPurpose() *string {
+ return nil
+}
+
+// GetProviderConfig implements Spec.
+func (d *DefaultSpec) GetProviderConfig() *runtime.RawExtension {
+ return d.ProviderConfig
+}
+
+// DefaultStatus contains common status fields for every extension resource.
+type DefaultStatus struct {
+ // ProviderStatus contains provider-specific status.
+ // +optional
+ ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"`
+ // Conditions represents the latest available observations of a Seed's current state.
+ // +optional
+ Conditions []gardencorev1beta1.Condition `json:"conditions,omitempty"`
+ // LastError holds information about the last occurred error during an operation.
+ // +optional
+ LastError *gardencorev1beta1.LastError `json:"lastError,omitempty"`
+ // LastOperation holds information about the last operation on the resource.
+ // +optional
+ LastOperation *gardencorev1beta1.LastOperation `json:"lastOperation,omitempty"`
+ // ObservedGeneration is the most recent generation observed for this resource.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+ // State can be filled by the operating controller with what ever data it needs.
+ // +optional
+ State *runtime.RawExtension `json:"state,omitempty"`
+ // Resources holds a list of named resource references that can be referred to in the state by their names.
+ // +optional
+ Resources []gardencorev1beta1.NamedResourceReference `json:"resources,omitempty"`
+}
+
+// GetProviderStatus implements Status.
+func (d *DefaultStatus) GetProviderStatus() *runtime.RawExtension {
+ return d.ProviderStatus
+}
+
+// GetConditions implements Status.
+func (d *DefaultStatus) GetConditions() []gardencorev1beta1.Condition {
+ return d.Conditions
+}
+
+// SetConditions implements Status.
+func (d *DefaultStatus) SetConditions(c []gardencorev1beta1.Condition) {
+ d.Conditions = c
+}
+
+// GetLastOperation implements Status.
+func (d *DefaultStatus) GetLastOperation() *gardencorev1beta1.LastOperation {
+ return d.LastOperation
+}
+
+// GetLastError implements Status.
+func (d *DefaultStatus) GetLastError() *gardencorev1beta1.LastError {
+ return d.LastError
+}
+
+// GetObservedGeneration implements Status.
+func (d *DefaultStatus) GetObservedGeneration() int64 {
+ return d.ObservedGeneration
+}
+
+// GetState implements Status.
+func (d *DefaultStatus) GetState() *runtime.RawExtension {
+ return d.State
+}
+
+// SetState implements Status.
+func (d *DefaultStatus) SetState(state *runtime.RawExtension) {
+ d.State = state
+}
+
+// GetResources implements Status.
+func (d *DefaultStatus) GetResources() []gardencorev1beta1.NamedResourceReference {
+ return d.Resources
+}
+
+// SetResources implements Status.
+func (d *DefaultStatus) SetResources(namedResourceReference []gardencorev1beta1.NamedResourceReference) {
+ d.Resources = namedResourceReference
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go
new file mode 100644
index 0000000..94953e4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go
@@ -0,0 +1,70 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ Object = (*Extension)(nil)
+
+// ExtensionResource is a constant for the name of the Extension resource.
+const ExtensionResource = "Extension"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Extension is a specification for a Extension resource.
+type Extension struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ExtensionSpec `json:"spec"`
+ Status ExtensionStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *Extension) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *Extension) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExtensionList is a list of Extension resources.
+type ExtensionList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []Extension `json:"items"`
+}
+
+// ExtensionSpec is the spec for a Extension resource.
+type ExtensionSpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+}
+
+// ExtensionStatus is the status for a Extension resource.
+type ExtensionStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go
new file mode 100644
index 0000000..7e514a3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ Object = (*Infrastructure)(nil)
+
+// InfrastructureResource is a constant for the name of the Infrastructure resource.
+const InfrastructureResource = "Infrastructure"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Infrastructure is a specification for cloud provider infrastructure.
+type Infrastructure struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec InfrastructureSpec `json:"spec"`
+ Status InfrastructureStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *Infrastructure) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *Infrastructure) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InfrastructureList is a list of Infrastructure resources.
+type InfrastructureList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of Infrastructures.
+ Items []Infrastructure `json:"items"`
+}
+
+// InfrastructureSpec is the spec for an Infrastructure resource.
+type InfrastructureSpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+ // Region is the region of this infrastructure.
+ Region string `json:"region"`
+ // SecretRef is a reference to a secret that contains the actual result of the generated cloud config.
+ SecretRef corev1.SecretReference `json:"secretRef"`
+ // SSHPublicKey is the public SSH key that should be used with this infrastructure.
+ // +optional
+ SSHPublicKey []byte `json:"sshPublicKey,omitempty"`
+}
+
+// InfrastructureStatus is the status for an Infrastructure resource.
+type InfrastructureStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+ // NodesCIDR is the CIDR of the node network that was optionally created by the acting extension controller.
+ // This might be needed in environments in which the CIDR for the network for the shoot worker node cannot
+ // be statically defined in the Shoot resource but must be computed dynamically.
+ // +optional
+ NodesCIDR *string `json:"nodesCIDR,omitempty"`
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go
new file mode 100644
index 0000000..98f4347
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ Object = (*Network)(nil)
+
+// NetworkResource is a constant for the name of the Network resource.
+const NetworkResource = "Network"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Network is the specification for cluster networking.
+type Network struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec NetworkSpec `json:"spec"`
+ Status NetworkStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (n *Network) GetExtensionSpec() Spec {
+ return &n.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (n *Network) GetExtensionStatus() Status {
+ return &n.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetworkList is a list of Network resources.
+type NetworkList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of Networks.
+ Items []Network `json:"items"`
+}
+
+// NetworkSpec is the spec for an Network resource.
+type NetworkSpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+ // PodCIDR defines the CIDR that will be used for pods.
+ PodCIDR string `json:"podCIDR"`
+ // ServiceCIDR defines the CIDR that will be used for services.
+ ServiceCIDR string `json:"serviceCIDR"`
+}
+
+// NetworkStatus is the status for an Network resource.
+type NetworkStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+}
+
+// GetExtensionType returns the type of this Network resource.
+func (n *Network) GetExtensionType() string {
+ return n.Spec.Type
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go
new file mode 100644
index 0000000..ecbda57
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go
@@ -0,0 +1,223 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ Object = (*OperatingSystemConfig)(nil)
+
+// OperatingSystemConfigResource is a constant for the name of the OperatingSystemConfig resource.
+const OperatingSystemConfigResource = "OperatingSystemConfig"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatingSystemConfig is a specification for a OperatingSystemConfig resource
+type OperatingSystemConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec OperatingSystemConfigSpec `json:"spec"`
+ Status OperatingSystemConfigStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (o *OperatingSystemConfig) GetExtensionSpec() Spec {
+ return &o.Spec
+}
+
+// GetExtensionPurpose implements Object.
+func (o *OperatingSystemConfigSpec) GetExtensionPurpose() *string {
+ return (*string)(&o.Purpose)
+
+}
+
+// GetExtensionStatus implements Object.
+func (o *OperatingSystemConfig) GetExtensionStatus() Status {
+ return &o.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatingSystemConfigList is a list of OperatingSystemConfig resources.
+type OperatingSystemConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of OperatingSystemConfigs.
+ Items []OperatingSystemConfig `json:"items"`
+}
+
+// OperatingSystemConfigSpec is the spec for a OperatingSystemConfig resource.
+type OperatingSystemConfigSpec struct {
+ // CRI config is a structure contains configurations of the CRI library
+ // +optional
+ CRIConfig *CRIConfig `json:"criConfig,omitempty"`
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+ // Purpose describes how the result of this OperatingSystemConfig is used by Gardener. Either it
+ // gets sent to the `Worker` extension controller to bootstrap a VM, or it is downloaded by the
+ // cloud-config-downloader script already running on a bootstrapped VM.
+ Purpose OperatingSystemConfigPurpose `json:"purpose"`
+ // ReloadConfigFilePath is the path to the generated operating system configuration. If set, controllers
+ // are asked to use it when determining the .status.command of this resource. For example, if for CoreOS
+ // the reload-path might be "/var/lib/config"; then the controller shall set .status.command to
+ // "/usr/bin/coreos-cloudinit --from-file=/var/lib/config".
+ // +optional
+ ReloadConfigFilePath *string `json:"reloadConfigFilePath,omitempty"`
+ // Units is a list of unit for the operating system configuration (usually, a systemd unit).
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ Units []Unit `json:"units,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
+ // Files is a list of files that should get written to the host's file system.
+ // +patchMergeKey=path
+ // +patchStrategy=merge
+ // +optional
+ Files []File `json:"files,omitempty" patchStrategy:"merge" patchMergeKey:"path"`
+}
+
+// Unit is a unit for the operating system configuration (usually, a systemd unit).
+type Unit struct {
+ // Name is the name of a unit.
+ Name string `json:"name"`
+ // Command is the unit's command.
+ // +optional
+ Command *string `json:"command,omitempty"`
+ // Enable describes whether the unit is enabled or not.
+ // +optional
+ Enable *bool `json:"enable,omitempty"`
+ // Content is the unit's content.
+ // +optional
+ Content *string `json:"content,omitempty"`
+ // DropIns is a list of drop-ins for this unit.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +optional
+ DropIns []DropIn `json:"dropIns,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
+}
+
+// DropIn is a drop-in configuration for a systemd unit.
+type DropIn struct {
+ // Name is the name of the drop-in.
+ Name string `json:"name"`
+ // Content is the content of the drop-in.
+ Content string `json:"content"`
+}
+
+// File is a file that should get written to the host's file system. The content can either be inlined or
+// referenced from a secret in the same namespace.
+type File struct {
+ // Path is the path of the file system where the file should get written to.
+ Path string `json:"path"`
+ // Permissions describes with which permissions the file should get written to the file system.
+ // Should be defaulted to octal 0644.
+ // +optional
+ Permissions *int32 `json:"permissions,omitempty"`
+ // Content describe the file's content.
+ Content FileContent `json:"content"`
+}
+
+// FileContent can either reference a secret or contain inline configuration.
+type FileContent struct {
+ // SecretRef is a struct that contains information about the referenced secret.
+ // +optional
+ SecretRef *FileContentSecretRef `json:"secretRef,omitempty"`
+ // Inline is a struct that contains information about the inlined data.
+ // +optional
+ Inline *FileContentInline `json:"inline,omitempty"`
+}
+
+// FileContentSecretRef contains keys for referencing a file content's data from a secret in the same namespace.
+type FileContentSecretRef struct {
+ // Name is the name of the secret.
+ Name string `json:"name"`
+ // DataKey is the key in the secret's `.data` field that should be read.
+ DataKey string `json:"dataKey"`
+}
+
+// FileContentInline contains keys for inlining a file content's data and encoding.
+type FileContentInline struct {
+ // Encoding is the file's encoding (e.g. base64).
+ Encoding string `json:"encoding"`
+ // Data is the file's data.
+ Data string `json:"data"`
+}
+
+// OperatingSystemConfigStatus is the status for a OperatingSystemConfig resource.
+type OperatingSystemConfigStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+ // CloudConfig is a structure for containing the generated output for the given operating system
+ // config spec. It contains a reference to a secret as the result may contain confidential data.
+ // +optional
+ CloudConfig *CloudConfig `json:"cloudConfig,omitempty"`
+ // Command is the command whose execution renews/reloads the cloud config on an existing VM, e.g.
+ // "/usr/bin/reload-cloud-config -from-file=". The is optionally provided by Gardener
+ // in the .spec.reloadConfigFilePath field.
+ // +optional
+ Command *string `json:"command,omitempty"`
+ // Units is a list of systemd unit names that are part of the generated Cloud Config and shall be
+ // restarted when a new version has been downloaded.
+ // +optional
+ Units []string `json:"units,omitempty"`
+}
+
+// CloudConfig is a structure for containing the generated output for the given operating system
+// config spec. It contains a reference to a secret as the result may contain confidential data.
+type CloudConfig struct {
+ // SecretRef is a reference to a secret that contains the actual result of the generated cloud config.
+ SecretRef corev1.SecretReference `json:"secretRef"`
+}
+
+// OperatingSystemConfigPurpose is a string alias.
+type OperatingSystemConfigPurpose string
+
+const (
+ // OperatingSystemConfigPurposeProvision describes that the operating system configuration is used to bootstrap a
+ // new VM.
+ OperatingSystemConfigPurposeProvision OperatingSystemConfigPurpose = "provision"
+ // OperatingSystemConfigPurposeReconcile describes that the operating system configuration is executed on an already
+ // provisioned VM by the cloud-config-downloader script.
+ OperatingSystemConfigPurposeReconcile OperatingSystemConfigPurpose = "reconcile"
+
+ // OperatingSystemConfigDefaultFilePermission is the default value for a permission of a file.
+ OperatingSystemConfigDefaultFilePermission int32 = 0644
+ // OperatingSystemConfigSecretDataKey is a constant for the key in a secret's `.data` field containing the
+ // results of a computed cloud config.
+ OperatingSystemConfigSecretDataKey = "cloud_config"
+)
+
+// CRI config is a structure contains configurations of the CRI library
+type CRIConfig struct {
+ // Name is a mandatory string containing the name of the CRI library.
+ Name CRIName `json:"name"`
+}
+
+// CRIName is a type alias for the CRI name string.
+type CRIName string
+
+const (
+ // CRINameContainerD is a constant for ContainerD CRI name
+ CRINameContainerD = "containerd"
+)
+
+// ContainerDRuntimeContainersBinFolder is the folder where Container Runtime binaries should be saved for ContainerD usage
+const ContainerDRuntimeContainersBinFolder = "/var/bin/containerruntimes"
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go
new file mode 100644
index 0000000..bcd5595
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go
@@ -0,0 +1,199 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+var _ Object = (*Worker)(nil)
+
+// WorkerResource is a constant for the name of the Worker resource.
+const WorkerResource = "Worker"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Worker is a specification for a Worker resource.
+type Worker struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec WorkerSpec `json:"spec"`
+ Status WorkerStatus `json:"status"`
+}
+
+// GetExtensionSpec implements Object.
+func (i *Worker) GetExtensionSpec() Spec {
+ return &i.Spec
+}
+
+// GetExtensionStatus implements Object.
+func (i *Worker) GetExtensionStatus() Status {
+ return &i.Status
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// WorkerList is a list of Worker resources.
+type WorkerList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of Worker.
+ Items []Worker `json:"items"`
+}
+
+// WorkerSpec is the spec for a Worker resource.
+type WorkerSpec struct {
+ // DefaultSpec is a structure containing common fields used by all extension resources.
+ DefaultSpec `json:",inline"`
+
+ // InfrastructureProviderStatus is a raw extension field that contains the provider status that has
+ // been generated by the controller responsible for the `Infrastructure` resource.
+ // +optional
+ InfrastructureProviderStatus *runtime.RawExtension `json:"infrastructureProviderStatus,omitempty"`
+ // Region is the name of the region where the worker pool should be deployed to.
+ Region string `json:"region"`
+ // SecretRef is a reference to a secret that contains the cloud provider specific credentials.
+ SecretRef corev1.SecretReference `json:"secretRef"`
+ // SSHPublicKey is the public SSH key that should be used with these workers.
+ // +optional
+ SSHPublicKey []byte `json:"sshPublicKey,omitempty"`
+ // Pools is a list of worker pools.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Pools []WorkerPool `json:"pools" patchStrategy:"merge" patchMergeKey:"name"`
+}
+
+// WorkerPool is the definition of a specific worker pool.
+type WorkerPool struct {
+ // MachineType contains information about the machine type that should be used for this worker pool.
+ MachineType string `json:"machineType"`
+ // Maximum is the maximum size of the worker pool.
+ Maximum int32 `json:"maximum"`
+ // MaxSurge is maximum number of VMs that are created during an update.
+ MaxSurge intstr.IntOrString `json:"maxSurge"`
+ // MaxUnavailable is the maximum number of VMs that can be unavailable during an update.
+ MaxUnavailable intstr.IntOrString `json:"maxUnavailable"`
+ // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty"`
+ // Taints is a list of taints for all the `Node` objects in this worker pool.
+ // +optional
+ Taints []corev1.Taint `json:"taints,omitempty"`
+ // MachineImage contains logical information about the name and the version of the machie image that
+ // should be used. The logical information must be mapped to the provider-specific information (e.g.,
+ // AMIs, ...) by the provider itself.
+ MachineImage MachineImage `json:"machineImage,omitempty"`
+ // Minimum is the minimum size of the worker pool.
+ Minimum int32 `json:"minimum"`
+ // Name is the name of this worker pool.
+ Name string `json:"name"`
+ // ProviderConfig is a provider specific configuration for the worker pool.
+ // +optional
+ ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"`
+ // UserData is a base64-encoded string that contains the data that is sent to the provider's APIs
+ // when a new machine/VM that is part of this worker pool shall be spawned.
+ UserData []byte `json:"userData"`
+ // Volume contains information about the root disks that should be used for this worker pool.
+ // +optional
+ Volume *Volume `json:"volume,omitempty"`
+ // DataVolumes contains a list of additional worker volumes.
+ // +optional
+ DataVolumes []DataVolume `json:"dataVolumes,omitempty"`
+ // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state.
+ // +optional
+ KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty"`
+ // Zones contains information about availability zones for this worker pool.
+ // +optional
+ Zones []string `json:"zones,omitempty"`
+ // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout.
+ // +optional
+ MachineControllerManagerSettings *gardencorev1beta1.MachineControllerManagerSettings `json:"machineControllerManager,omitempty"`
+}
+
+// MachineImage contains logical information about the name and the version of the machie image that
+// should be used. The logical information must be mapped to the provider-specific information (e.g.,
+// AMIs, ...) by the provider itself.
+type MachineImage struct {
+ // Name is the logical name of the machine image.
+ Name string `json:"name"`
+ // Version is the version of the machine image.
+ Version string `json:"version"`
+}
+
+// Volume contains information about the root disks that should be used for worker pools.
+type Volume struct {
+ // Name of the volume to make it referencable.
+ // +optional
+ Name *string `json:"name,omitempty"`
+ // Type is the type of the volume.
+ // +optional
+ Type *string `json:"type,omitempty"`
+ // Size is the of the root volume.
+ Size string `json:"size"`
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty"`
+}
+
+// DataVolume contains information about a data volume.
+type DataVolume struct {
+ // Name of the volume to make it referencable.
+ Name string `json:"name"`
+ // Type is the type of the volume.
+ // +optional
+ Type *string `json:"type,omitempty"`
+ // Size is the of the root volume.
+ Size string `json:"size"`
+ // Encrypted determines if the volume should be encrypted.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty"`
+}
+
+// WorkerStatus is the status for a Worker resource.
+type WorkerStatus struct {
+ // DefaultStatus is a structure containing common fields used by all extension resources.
+ DefaultStatus `json:",inline"`
+ // MachineDeployments is a list of created machine deployments. It will be used to e.g. configure
+ // the cluster-autoscaler properly.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ MachineDeployments []MachineDeployment `json:"machineDeployments,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
+}
+
+// MachineDeployment is a created machine deployment.
+type MachineDeployment struct {
+ // Name is the name of the `MachineDeployment` resource.
+ Name string `json:"name"`
+ // Minimum is the minimum number for this machine deployment.
+ Minimum int32 `json:"minimum"`
+ // Maximum is the maximum number for this machine deployment.
+ Maximum int32 `json:"maximum"`
+}
+
+// WorkerRollingUpdate is a constant for a condition type indicating a rolling update for any worker pool of the Shoot.
+const WorkerRollingUpdate gardencorev1beta1.ConditionType = "RollingUpdate"
diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..51466ea
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,1475 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucket) DeepCopyInto(out *BackupBucket) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket.
+func (in *BackupBucket) DeepCopy() *BackupBucket {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucket)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucket) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupBucket, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList.
+func (in *BackupBucketList) DeepCopy() *BackupBucketList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupBucketList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec.
+func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ if in.GeneratedSecretRef != nil {
+ in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus.
+func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntry) DeepCopyInto(out *BackupEntry) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry.
+func (in *BackupEntry) DeepCopy() *BackupEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntry) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BackupEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList.
+func (in *BackupEntryList) DeepCopy() *BackupEntryList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupEntryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ if in.BackupBucketProviderStatus != nil {
+ in, out := &in.BackupBucketProviderStatus, &out.BackupBucketProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec.
+func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntrySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus.
+func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CRIConfig) DeepCopyInto(out *CRIConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIConfig.
+func (in *CRIConfig) DeepCopy() *CRIConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CRIConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudConfig) DeepCopyInto(out *CloudConfig) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudConfig.
+func (in *CloudConfig) DeepCopy() *CloudConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+ if in == nil {
+ return nil
+ }
+ out := new(Cluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Cluster) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterList) DeepCopyInto(out *ClusterList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Cluster, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
+func (in *ClusterList) DeepCopy() *ClusterList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
+ *out = *in
+ in.CloudProfile.DeepCopyInto(&out.CloudProfile)
+ in.Seed.DeepCopyInto(&out.Seed)
+ in.Shoot.DeepCopyInto(&out.Shoot)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
+func (in *ClusterSpec) DeepCopy() *ClusterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntime.
+func (in *ContainerRuntime) DeepCopy() *ContainerRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ContainerRuntime) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntimeList) DeepCopyInto(out *ContainerRuntimeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ContainerRuntime, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeList.
+func (in *ContainerRuntimeList) DeepCopy() *ContainerRuntimeList {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntimeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ContainerRuntimeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntimeSpec) DeepCopyInto(out *ContainerRuntimeSpec) {
+ *out = *in
+ in.WorkerPool.DeepCopyInto(&out.WorkerPool)
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeSpec.
+func (in *ContainerRuntimeSpec) DeepCopy() *ContainerRuntimeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntimeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntimeStatus) DeepCopyInto(out *ContainerRuntimeStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeStatus.
+func (in *ContainerRuntimeStatus) DeepCopy() *ContainerRuntimeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntimeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRuntimeWorkerPool) DeepCopyInto(out *ContainerRuntimeWorkerPool) {
+ *out = *in
+ in.Selector.DeepCopyInto(&out.Selector)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntimeWorkerPool.
+func (in *ContainerRuntimeWorkerPool) DeepCopy() *ContainerRuntimeWorkerPool {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerRuntimeWorkerPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlane) DeepCopyInto(out *ControlPlane) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlane.
+func (in *ControlPlane) DeepCopy() *ControlPlane {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlane)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControlPlane) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneList) DeepCopyInto(out *ControlPlaneList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControlPlane, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneList.
+func (in *ControlPlaneList) DeepCopy() *ControlPlaneList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControlPlaneList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneSpec) DeepCopyInto(out *ControlPlaneSpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ if in.Purpose != nil {
+ in, out := &in.Purpose, &out.Purpose
+ *out = new(Purpose)
+ **out = **in
+ }
+ if in.InfrastructureProviderStatus != nil {
+ in, out := &in.InfrastructureProviderStatus, &out.InfrastructureProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneSpec.
+func (in *ControlPlaneSpec) DeepCopy() *ControlPlaneSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneStatus) DeepCopyInto(out *ControlPlaneStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneStatus.
+func (in *ControlPlaneStatus) DeepCopy() *ControlPlaneStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataVolume) DeepCopyInto(out *DataVolume) {
+ *out = *in
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume.
+func (in *DataVolume) DeepCopy() *DataVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(DataVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DefaultSpec) DeepCopyInto(out *DefaultSpec) {
+ *out = *in
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSpec.
+func (in *DefaultSpec) DeepCopy() *DefaultSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DefaultSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DefaultStatus) DeepCopyInto(out *DefaultStatus) {
+ *out = *in
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1beta1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.LastError != nil {
+ in, out := &in.LastError, &out.LastError
+ *out = new(v1beta1.LastError)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(v1beta1.LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.State != nil {
+ in, out := &in.State, &out.State
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]v1beta1.NamedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultStatus.
+func (in *DefaultStatus) DeepCopy() *DefaultStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DefaultStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DropIn) DeepCopyInto(out *DropIn) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DropIn.
+func (in *DropIn) DeepCopy() *DropIn {
+ if in == nil {
+ return nil
+ }
+ out := new(DropIn)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Extension) DeepCopyInto(out *Extension) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension.
+func (in *Extension) DeepCopy() *Extension {
+ if in == nil {
+ return nil
+ }
+ out := new(Extension)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Extension) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtensionList) DeepCopyInto(out *ExtensionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Extension, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionList.
+func (in *ExtensionList) DeepCopy() *ExtensionList {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtensionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExtensionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtensionSpec) DeepCopyInto(out *ExtensionSpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionSpec.
+func (in *ExtensionSpec) DeepCopy() *ExtensionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtensionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtensionStatus) DeepCopyInto(out *ExtensionStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionStatus.
+func (in *ExtensionStatus) DeepCopy() *ExtensionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtensionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *File) DeepCopyInto(out *File) {
+ *out = *in
+ if in.Permissions != nil {
+ in, out := &in.Permissions, &out.Permissions
+ *out = new(int32)
+ **out = **in
+ }
+ in.Content.DeepCopyInto(&out.Content)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new File.
+func (in *File) DeepCopy() *File {
+ if in == nil {
+ return nil
+ }
+ out := new(File)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileContent) DeepCopyInto(out *FileContent) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(FileContentSecretRef)
+ **out = **in
+ }
+ if in.Inline != nil {
+ in, out := &in.Inline, &out.Inline
+ *out = new(FileContentInline)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContent.
+func (in *FileContent) DeepCopy() *FileContent {
+ if in == nil {
+ return nil
+ }
+ out := new(FileContent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileContentInline) DeepCopyInto(out *FileContentInline) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContentInline.
+func (in *FileContentInline) DeepCopy() *FileContentInline {
+ if in == nil {
+ return nil
+ }
+ out := new(FileContentInline)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileContentSecretRef) DeepCopyInto(out *FileContentSecretRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContentSecretRef.
+func (in *FileContentSecretRef) DeepCopy() *FileContentSecretRef {
+ if in == nil {
+ return nil
+ }
+ out := new(FileContentSecretRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Infrastructure) DeepCopyInto(out *Infrastructure) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure.
+func (in *Infrastructure) DeepCopy() *Infrastructure {
+ if in == nil {
+ return nil
+ }
+ out := new(Infrastructure)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Infrastructure) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Infrastructure, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList.
+func (in *InfrastructureList) DeepCopy() *InfrastructureList {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InfrastructureList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ out.SecretRef = in.SecretRef
+ if in.SSHPublicKey != nil {
+ in, out := &in.SSHPublicKey, &out.SSHPublicKey
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec.
+func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ if in.NodesCIDR != nil {
+ in, out := &in.NodesCIDR, &out.NodesCIDR
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus.
+func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment.
+func (in *MachineDeployment) DeepCopy() *MachineDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineImage) DeepCopyInto(out *MachineImage) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage.
+func (in *MachineImage) DeepCopy() *MachineImage {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Network) DeepCopyInto(out *Network) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
+func (in *Network) DeepCopy() *Network {
+ if in == nil {
+ return nil
+ }
+ out := new(Network)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Network) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkList) DeepCopyInto(out *NetworkList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Network, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList.
+func (in *NetworkList) DeepCopy() *NetworkList {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
+func (in *NetworkSpec) DeepCopy() *NetworkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
+func (in *NetworkStatus) DeepCopy() *NetworkStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatingSystemConfig) DeepCopyInto(out *OperatingSystemConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfig.
+func (in *OperatingSystemConfig) DeepCopy() *OperatingSystemConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatingSystemConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatingSystemConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatingSystemConfigList) DeepCopyInto(out *OperatingSystemConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatingSystemConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfigList.
+func (in *OperatingSystemConfigList) DeepCopy() *OperatingSystemConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatingSystemConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatingSystemConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatingSystemConfigSpec) DeepCopyInto(out *OperatingSystemConfigSpec) {
+ *out = *in
+ if in.CRIConfig != nil {
+ in, out := &in.CRIConfig, &out.CRIConfig
+ *out = new(CRIConfig)
+ **out = **in
+ }
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ if in.ReloadConfigFilePath != nil {
+ in, out := &in.ReloadConfigFilePath, &out.ReloadConfigFilePath
+ *out = new(string)
+ **out = **in
+ }
+ if in.Units != nil {
+ in, out := &in.Units, &out.Units
+ *out = make([]Unit, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = make([]File, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfigSpec.
+func (in *OperatingSystemConfigSpec) DeepCopy() *OperatingSystemConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatingSystemConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatingSystemConfigStatus) DeepCopyInto(out *OperatingSystemConfigStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ if in.CloudConfig != nil {
+ in, out := &in.CloudConfig, &out.CloudConfig
+ *out = new(CloudConfig)
+ **out = **in
+ }
+ if in.Command != nil {
+ in, out := &in.Command, &out.Command
+ *out = new(string)
+ **out = **in
+ }
+ if in.Units != nil {
+ in, out := &in.Units, &out.Units
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfigStatus.
+func (in *OperatingSystemConfigStatus) DeepCopy() *OperatingSystemConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatingSystemConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Unit) DeepCopyInto(out *Unit) {
+ *out = *in
+ if in.Command != nil {
+ in, out := &in.Command, &out.Command
+ *out = new(string)
+ **out = **in
+ }
+ if in.Enable != nil {
+ in, out := &in.Enable, &out.Enable
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Content != nil {
+ in, out := &in.Content, &out.Content
+ *out = new(string)
+ **out = **in
+ }
+ if in.DropIns != nil {
+ in, out := &in.DropIns, &out.DropIns
+ *out = make([]DropIn, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unit.
+func (in *Unit) DeepCopy() *Unit {
+ if in == nil {
+ return nil
+ }
+ out := new(Unit)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+ if in == nil {
+ return nil
+ }
+ out := new(Volume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Worker) DeepCopyInto(out *Worker) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker.
+func (in *Worker) DeepCopy() *Worker {
+ if in == nil {
+ return nil
+ }
+ out := new(Worker)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Worker) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerList) DeepCopyInto(out *WorkerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Worker, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerList.
+func (in *WorkerList) DeepCopy() *WorkerList {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *WorkerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerPool) DeepCopyInto(out *WorkerPool) {
+ *out = *in
+ out.MaxSurge = in.MaxSurge
+ out.MaxUnavailable = in.MaxUnavailable
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]v1.Taint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.MachineImage = in.MachineImage
+ if in.ProviderConfig != nil {
+ in, out := &in.ProviderConfig, &out.ProviderConfig
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.UserData != nil {
+ in, out := &in.UserData, &out.UserData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(Volume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DataVolumes != nil {
+ in, out := &in.DataVolumes, &out.DataVolumes
+ *out = make([]DataVolume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.KubeletDataVolumeName != nil {
+ in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.MachineControllerManagerSettings != nil {
+ in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings
+ *out = new(v1beta1.MachineControllerManagerSettings)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerPool.
+func (in *WorkerPool) DeepCopy() *WorkerPool {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerSpec) DeepCopyInto(out *WorkerSpec) {
+ *out = *in
+ in.DefaultSpec.DeepCopyInto(&out.DefaultSpec)
+ if in.InfrastructureProviderStatus != nil {
+ in, out := &in.InfrastructureProviderStatus, &out.InfrastructureProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ out.SecretRef = in.SecretRef
+ if in.SSHPublicKey != nil {
+ in, out := &in.SSHPublicKey, &out.SSHPublicKey
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.Pools != nil {
+ in, out := &in.Pools, &out.Pools
+ *out = make([]WorkerPool, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSpec.
+func (in *WorkerSpec) DeepCopy() *WorkerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkerStatus) DeepCopyInto(out *WorkerStatus) {
+ *out = *in
+ in.DefaultStatus.DeepCopyInto(&out.DefaultStatus)
+ if in.MachineDeployments != nil {
+ in, out := &in.MachineDeployments, &out.MachineDeployments
+ *out = make([]MachineDeployment, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerStatus.
+func (in *WorkerStatus) DeepCopy() *WorkerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go
new file mode 100644
index 0000000..c549105
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go
@@ -0,0 +1,197 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chartrenderer
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/version"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/rest"
+
+ "k8s.io/helm/pkg/chartutil"
+ "k8s.io/helm/pkg/engine"
+ "k8s.io/helm/pkg/manifest"
+ chartapi "k8s.io/helm/pkg/proto/hapi/chart"
+ "k8s.io/helm/pkg/timeconv"
+)
+
+const notesFileSuffix = "NOTES.txt"
+
+// chartRenderer is a struct which contains the chart render engine and a Kubernetes client.
+// The chart render is used to render the Helm charts into a RenderedChart struct from which the
+// resulting manifest can be generated.
+type chartRenderer struct {
+ renderer *engine.Engine
+ capabilities *chartutil.Capabilities
+}
+
+// NewForConfig creates a new ChartRenderer object. It requires a Kubernetes client as input which will be
+// injected in the Tiller environment.
+func NewForConfig(cfg *rest.Config) (Interface, error) {
+ disc, err := discovery.NewDiscoveryClientForConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ sv, err := disc.ServerVersion()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get kubernetes server version %v", err)
+ }
+
+ return NewWithServerVersion(sv), nil
+}
+
+// NewWithServerVersion creates a new chart renderer with the given server version.
+func NewWithServerVersion(serverVersion *version.Info) Interface {
+ return &chartRenderer{
+ renderer: engine.New(),
+ capabilities: &chartutil.Capabilities{KubeVersion: serverVersion},
+ }
+}
+
+// DiscoverCapabilities discovers the capabilities required for chart renderers using the given
+// DiscoveryInterface.
+func DiscoverCapabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) {
+ sv, err := disc.ServerVersion()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get kubernetes server version %v", err)
+ }
+
+ return &chartutil.Capabilities{KubeVersion: sv}, nil
+}
+
+// Render loads the chart from the given location and calls the Render() function
+// to convert it into a ChartRelease object.
+func (r *chartRenderer) Render(chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) {
+ chart, err := chartutil.Load(chartPath)
+ if err != nil {
+ return nil, fmt.Errorf("can't create load chart from path %s:, %s", chartPath, err)
+ }
+ return r.renderRelease(chart, releaseName, namespace, values)
+}
+
+// RenderArchive loads the chart from the given location and calls the Render() function
+// to convert it into a ChartRelease object.
+func (r *chartRenderer) RenderArchive(archive []byte, releaseName, namespace string, values interface{}) (*RenderedChart, error) {
+ chart, err := chartutil.LoadArchive(bytes.NewReader(archive))
+ if err != nil {
+ return nil, fmt.Errorf("can't create load chart from archive: %s", err)
+ }
+ return r.renderRelease(chart, releaseName, namespace, values)
+}
+
+func (r *chartRenderer) renderRelease(chart *chartapi.Chart, releaseName, namespace string, values interface{}) (*RenderedChart, error) {
+ chartName := chart.GetMetadata().GetName()
+
+ parsedValues, err := json.Marshal(values)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse variables for chart %s: ,%s", chartName, err)
+ }
+ chartConfig := &chartapi.Config{Raw: string(parsedValues)}
+
+ err = chartutil.ProcessRequirementsEnabled(chart, chartConfig)
+ if err != nil {
+ return nil, fmt.Errorf("can't process requirements for chart %s: ,%s", chartName, err)
+ }
+ err = chartutil.ProcessRequirementsImportValues(chart)
+ if err != nil {
+ return nil, fmt.Errorf("can't process requirements for import values for chart %s: ,%s", chartName, err)
+ }
+
+ caps := r.capabilities
+ revision := 1
+ ts := timeconv.Now()
+ options := chartutil.ReleaseOptions{
+ Name: releaseName,
+ Time: ts,
+ Namespace: namespace,
+ Revision: revision,
+ IsInstall: true,
+ }
+
+ valuesToRender, err := chartutil.ToRenderValuesCaps(chart, chartConfig, options, caps)
+ if err != nil {
+ return nil, err
+ }
+ return r.renderResources(chart, valuesToRender)
+}
+
+func (r *chartRenderer) renderResources(ch *chartapi.Chart, values chartutil.Values) (*RenderedChart, error) {
+ files, err := r.renderer.Render(ch, values)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remove NOTES.txt and partials
+ for k := range files {
+ if strings.HasSuffix(k, notesFileSuffix) || strings.HasPrefix(path.Base(k), "_") {
+ delete(files, k)
+ }
+ }
+
+ manifests := manifest.SplitManifests(files)
+ manifests = SortByKind(manifests)
+
+ return &RenderedChart{
+ ChartName: ch.Metadata.Name,
+ Manifests: manifests,
+ }, nil
+}
+
+// Manifest returns the manifest of the rendered chart as byte array.
+func (c *RenderedChart) Manifest() []byte {
+ // Aggregate all valid manifests into one big doc.
+ b := bytes.NewBuffer(nil)
+
+ for _, mf := range c.Manifests {
+ b.WriteString("\n---\n# Source: " + mf.Name + "\n")
+ b.WriteString(mf.Content)
+ }
+ return b.Bytes()
+}
+
+// Files returns all rendered manifests mapping their names to their content.
+func (c *RenderedChart) Files() map[string]string {
+ var files = make(map[string]string)
+ for _, manifest := range c.Manifests {
+ files[manifest.Name] = manifest.Content
+ }
+ return files
+}
+
+// FileContent returns explicitly the content of the provided .
+func (c *RenderedChart) FileContent(filename string) string {
+ for _, mf := range c.Manifests {
+ if mf.Name == fmt.Sprintf("%s/templates/%s", c.ChartName, filename) {
+ return mf.Content
+ }
+ }
+ return ""
+}
+
+// AsSecretData returns all rendered manifests that is capable for used as data of a secret
+func (c *RenderedChart) AsSecretData() map[string][]byte {
+ data := make(map[string][]byte, len(c.Files()))
+ for fileName, fileContent := range c.Files() {
+ key := strings.ReplaceAll(fileName, "/", "_")
+ data[key] = []byte(fileContent)
+ }
+ return data
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go
new file mode 100644
index 0000000..e93b44f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chartrenderer
+
+import (
+ "k8s.io/client-go/rest"
+)
+
+// Factory is a factory that is able to produce Interface.
+type Factory interface {
+ NewForConfig(config *rest.Config) (Interface, error)
+}
+
+// FactoryFunc implements the Factory interface.
+type FactoryFunc func(config *rest.Config) (Interface, error)
+
+// NewForConfig implements Factory.
+func (f FactoryFunc) NewForConfig(config *rest.Config) (Interface, error) {
+ return f(config)
+}
+
+// DefaultFactory returns the default Factory.
+func DefaultFactory() Factory {
+ return FactoryFunc(NewForConfig)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go
new file mode 100644
index 0000000..dac90c2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chartrenderer
+
+import (
+ "k8s.io/helm/pkg/manifest"
+)
+
+// Interface is an interface for rendering Helm Charts from path, name, namespace and values.
+type Interface interface {
+ Render(chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error)
+ RenderArchive(archive []byte, releaseName, namespace string, values interface{}) (*RenderedChart, error)
+}
+
+// RenderedChart holds a map of rendered templates file with template file name as key and
+// rendered template as value.
+type RenderedChart struct {
+ ChartName string
+ Manifests []manifest.Manifest
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go
new file mode 100644
index 0000000..cded184
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go
@@ -0,0 +1,121 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// @rfranzke, @zanetworker:
+// Origin of this file: https://github.com/helm/helm/blob/bed4054c412f95d140c8c98b6387f40df7f3139e/pkg/tiller/kind_sorter.go
+// We have copied this to our repository to not depend on k8s.io/helm/pkg/tiller.
+// This package is depending on k8s.io/helm/pkg/kube which transitively depends on k8s.io/kubernetes.
+// To prevent vendor hell we don't want to depend on k8s.io/kubernetes.
+
+package chartrenderer
+
+import (
+ "sort"
+
+ "k8s.io/helm/pkg/manifest"
+)
+
+// SortOrder is an ordering of Kinds.
+type SortOrder []string
+
+// InstallOrder is the order in which manifests should be installed (by Kind).
+//
+// Those occurring earlier in the list get installed before those occurring later in the list.
+var InstallOrder SortOrder = []string{
+ "Namespace",
+ "ResourceQuota",
+ "LimitRange",
+ "PodSecurityPolicy",
+ "PodDisruptionBudget",
+ "Secret",
+ "ConfigMap",
+ "StorageClass",
+ "PersistentVolume",
+ "PersistentVolumeClaim",
+ "ServiceAccount",
+ "CustomResourceDefinition",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "Role",
+ "RoleBinding",
+ "Service",
+ "DaemonSet",
+ "Pod",
+ "ReplicationController",
+ "ReplicaSet",
+ "Deployment",
+ "StatefulSet",
+ "Job",
+ "CronJob",
+ "Ingress",
+ "APIService",
+}
+
+type kindSorter struct {
+ ordering map[string]int
+ manifests []manifest.Manifest
+}
+
+func newKindSorter(m []manifest.Manifest, s SortOrder) *kindSorter {
+ o := make(map[string]int, len(s))
+ for v, k := range s {
+ o[k] = v
+ }
+
+ return &kindSorter{
+ manifests: m,
+ ordering: o,
+ }
+}
+
+func (k *kindSorter) Len() int { return len(k.manifests) }
+
+func (k *kindSorter) Swap(i, j int) { k.manifests[i], k.manifests[j] = k.manifests[j], k.manifests[i] }
+
+func (k *kindSorter) Less(i, j int) bool {
+ a := k.manifests[i]
+ b := k.manifests[j]
+ first, aok := k.ordering[a.Head.Kind]
+ second, bok := k.ordering[b.Head.Kind]
+
+ if !aok && !bok {
+ // if both are unknown then sort alphabetically by kind and name
+ if a.Head.Kind != b.Head.Kind {
+ return a.Head.Kind < b.Head.Kind
+ }
+ return a.Name < b.Name
+ }
+
+ // unknown kind is last
+ if !aok {
+ return false
+ }
+ if !bok {
+ return true
+ }
+
+ // if same kind sub sort alphanumeric
+ if first == second {
+ return a.Name < b.Name
+ }
+ // sort different kinds
+ return first < second
+}
+
+// SortByKind sorts manifests in InstallOrder
+func SortByKind(manifests []manifest.Manifest) []manifest.Manifest {
+ ordering := InstallOrder
+ ks := newKindSorter(manifests, ordering)
+ sort.Sort(ks)
+ return ks.manifests
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go
new file mode 100644
index 0000000..d6ec686
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go
@@ -0,0 +1,111 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+
+ corev1alpha1 "github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1"
+ corev1beta1 "github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface
+ CoreV1beta1() corev1beta1.CoreV1beta1Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ coreV1alpha1 *corev1alpha1.CoreV1alpha1Client
+ coreV1beta1 *corev1beta1.CoreV1beta1Client
+}
+
+// CoreV1alpha1 retrieves the CoreV1alpha1Client
+func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface {
+ return c.coreV1alpha1
+}
+
+// CoreV1beta1 retrieves the CoreV1beta1Client
+func (c *Clientset) CoreV1beta1() corev1beta1.CoreV1beta1Interface {
+ return c.coreV1beta1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.coreV1alpha1, err = corev1alpha1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ cs.coreV1beta1, err = corev1beta1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.coreV1alpha1 = corev1alpha1.NewForConfigOrDie(c)
+ cs.coreV1beta1 = corev1beta1.NewForConfigOrDie(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.coreV1alpha1 = corev1alpha1.New(c)
+ cs.coreV1beta1 = corev1beta1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go
new file mode 100644
index 0000000..32d8522
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package versioned
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go
new file mode 100644
index 0000000..7d4fb77
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go
new file mode 100644
index 0000000..aee592d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ corev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ corev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ corev1alpha1.AddToScheme,
+ corev1beta1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go
new file mode 100644
index 0000000..8e168d5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BackupBucketsGetter has a method to return a BackupBucketInterface.
+// A group's client should implement this interface.
+type BackupBucketsGetter interface {
+ BackupBuckets() BackupBucketInterface
+}
+
+// BackupBucketInterface has methods to work with BackupBucket resources.
+type BackupBucketInterface interface {
+ Create(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.CreateOptions) (*v1alpha1.BackupBucket, error)
+ Update(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (*v1alpha1.BackupBucket, error)
+ UpdateStatus(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (*v1alpha1.BackupBucket, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BackupBucket, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupBucketList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupBucket, err error)
+ BackupBucketExpansion
+}
+
+// backupBuckets implements BackupBucketInterface
+type backupBuckets struct {
+ client rest.Interface
+}
+
+// newBackupBuckets returns a BackupBuckets
+func newBackupBuckets(c *CoreV1alpha1Client) *backupBuckets {
+ return &backupBuckets{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the backupBucket, and returns the corresponding backupBucket object, and an error if there is any.
+func (c *backupBuckets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupBucket, err error) {
+ result = &v1alpha1.BackupBucket{}
+ err = c.client.Get().
+ Resource("backupbuckets").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of BackupBuckets that match those selectors.
+func (c *backupBuckets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupBucketList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.BackupBucketList{}
+ err = c.client.Get().
+ Resource("backupbuckets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested backupBuckets.
+func (c *backupBuckets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("backupbuckets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a backupBucket and creates it. Returns the server's representation of the backupBucket, and an error, if there is any.
+func (c *backupBuckets) Create(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.CreateOptions) (result *v1alpha1.BackupBucket, err error) {
+ result = &v1alpha1.BackupBucket{}
+ err = c.client.Post().
+ Resource("backupbuckets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupBucket).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a backupBucket and updates it. Returns the server's representation of the backupBucket, and an error, if there is any.
+func (c *backupBuckets) Update(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (result *v1alpha1.BackupBucket, err error) {
+ result = &v1alpha1.BackupBucket{}
+ err = c.client.Put().
+ Resource("backupbuckets").
+ Name(backupBucket.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupBucket).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *backupBuckets) UpdateStatus(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (result *v1alpha1.BackupBucket, err error) {
+ result = &v1alpha1.BackupBucket{}
+ err = c.client.Put().
+ Resource("backupbuckets").
+ Name(backupBucket.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupBucket).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the backupBucket and deletes it. Returns an error if one occurs.
+func (c *backupBuckets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("backupbuckets").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *backupBuckets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("backupbuckets").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched backupBucket.
+func (c *backupBuckets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupBucket, err error) {
+ result = &v1alpha1.BackupBucket{}
+ err = c.client.Patch(pt).
+ Resource("backupbuckets").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go
new file mode 100644
index 0000000..cf36714
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BackupEntriesGetter has a method to return a BackupEntryInterface.
+// A group's client should implement this interface.
+type BackupEntriesGetter interface {
+ BackupEntries(namespace string) BackupEntryInterface
+}
+
+// BackupEntryInterface has methods to work with BackupEntry resources.
+type BackupEntryInterface interface {
+ Create(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.CreateOptions) (*v1alpha1.BackupEntry, error)
+ Update(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (*v1alpha1.BackupEntry, error)
+ UpdateStatus(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (*v1alpha1.BackupEntry, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BackupEntry, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupEntryList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupEntry, err error)
+ BackupEntryExpansion
+}
+
+// backupEntries implements BackupEntryInterface
+type backupEntries struct {
+ client rest.Interface
+ ns string
+}
+
+// newBackupEntries returns a BackupEntries
+func newBackupEntries(c *CoreV1alpha1Client, namespace string) *backupEntries {
+ return &backupEntries{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the backupEntry, and returns the corresponding backupEntry object, and an error if there is any.
+func (c *backupEntries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupEntry, err error) {
+ result = &v1alpha1.BackupEntry{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of BackupEntries that match those selectors.
+func (c *backupEntries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupEntryList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.BackupEntryList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested backupEntries.
+func (c *backupEntries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a backupEntry and creates it. Returns the server's representation of the backupEntry, and an error, if there is any.
+func (c *backupEntries) Create(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.CreateOptions) (result *v1alpha1.BackupEntry, err error) {
+ result = &v1alpha1.BackupEntry{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupEntry).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a backupEntry and updates it. Returns the server's representation of the backupEntry, and an error, if there is any.
+func (c *backupEntries) Update(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (result *v1alpha1.BackupEntry, err error) {
+ result = &v1alpha1.BackupEntry{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(backupEntry.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupEntry).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *backupEntries) UpdateStatus(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (result *v1alpha1.BackupEntry, err error) {
+ result = &v1alpha1.BackupEntry{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(backupEntry.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupEntry).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the backupEntry and deletes it. Returns an error if one occurs.
+func (c *backupEntries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *backupEntries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched backupEntry.
+func (c *backupEntries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupEntry, err error) {
+ result = &v1alpha1.BackupEntry{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go
new file mode 100644
index 0000000..064d004
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go
@@ -0,0 +1,168 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CloudProfilesGetter has a method to return a CloudProfileInterface.
+// A group's client should implement this interface.
+type CloudProfilesGetter interface {
+ CloudProfiles() CloudProfileInterface
+}
+
+// CloudProfileInterface has methods to work with CloudProfile resources.
+type CloudProfileInterface interface {
+ Create(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.CreateOptions) (*v1alpha1.CloudProfile, error)
+ Update(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.UpdateOptions) (*v1alpha1.CloudProfile, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CloudProfile, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CloudProfileList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudProfile, err error)
+ CloudProfileExpansion
+}
+
+// cloudProfiles implements CloudProfileInterface
+type cloudProfiles struct {
+ client rest.Interface
+}
+
+// newCloudProfiles returns a CloudProfiles
+func newCloudProfiles(c *CoreV1alpha1Client) *cloudProfiles {
+ return &cloudProfiles{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the cloudProfile, and returns the corresponding cloudProfile object, and an error if there is any.
+func (c *cloudProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloudProfile, err error) {
+ result = &v1alpha1.CloudProfile{}
+ err = c.client.Get().
+ Resource("cloudprofiles").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CloudProfiles that match those selectors.
+func (c *cloudProfiles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloudProfileList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.CloudProfileList{}
+ err = c.client.Get().
+ Resource("cloudprofiles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested cloudProfiles.
+func (c *cloudProfiles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("cloudprofiles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a cloudProfile and creates it. Returns the server's representation of the cloudProfile, and an error, if there is any.
+func (c *cloudProfiles) Create(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.CreateOptions) (result *v1alpha1.CloudProfile, err error) {
+ result = &v1alpha1.CloudProfile{}
+ err = c.client.Post().
+ Resource("cloudprofiles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cloudProfile).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a cloudProfile and updates it. Returns the server's representation of the cloudProfile, and an error, if there is any.
+func (c *cloudProfiles) Update(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.UpdateOptions) (result *v1alpha1.CloudProfile, err error) {
+ result = &v1alpha1.CloudProfile{}
+ err = c.client.Put().
+ Resource("cloudprofiles").
+ Name(cloudProfile.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cloudProfile).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the cloudProfile and deletes it. Returns an error if one occurs.
+func (c *cloudProfiles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("cloudprofiles").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *cloudProfiles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("cloudprofiles").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched cloudProfile.
+func (c *cloudProfiles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudProfile, err error) {
+ result = &v1alpha1.CloudProfile{}
+ err = c.client.Patch(pt).
+ Resource("cloudprofiles").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go
new file mode 100644
index 0000000..219170e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ControllerInstallationsGetter has a method to return a ControllerInstallationInterface.
+// A group's client should implement this interface.
+type ControllerInstallationsGetter interface {
+ ControllerInstallations() ControllerInstallationInterface
+}
+
+// ControllerInstallationInterface has methods to work with ControllerInstallation resources.
+type ControllerInstallationInterface interface {
+ Create(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.CreateOptions) (*v1alpha1.ControllerInstallation, error)
+ Update(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (*v1alpha1.ControllerInstallation, error)
+ UpdateStatus(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (*v1alpha1.ControllerInstallation, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ControllerInstallation, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ControllerInstallationList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerInstallation, err error)
+ ControllerInstallationExpansion
+}
+
+// controllerInstallations implements ControllerInstallationInterface
+type controllerInstallations struct {
+ client rest.Interface
+}
+
+// newControllerInstallations returns a ControllerInstallations
+func newControllerInstallations(c *CoreV1alpha1Client) *controllerInstallations {
+ return &controllerInstallations{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the controllerInstallation, and returns the corresponding controllerInstallation object, and an error if there is any.
+func (c *controllerInstallations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ControllerInstallation, err error) {
+ result = &v1alpha1.ControllerInstallation{}
+ err = c.client.Get().
+ Resource("controllerinstallations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ControllerInstallations that match those selectors.
+func (c *controllerInstallations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ControllerInstallationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ControllerInstallationList{}
+ err = c.client.Get().
+ Resource("controllerinstallations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerInstallations.
+func (c *controllerInstallations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("controllerinstallations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a controllerInstallation and creates it. Returns the server's representation of the controllerInstallation, and an error, if there is any.
+func (c *controllerInstallations) Create(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.CreateOptions) (result *v1alpha1.ControllerInstallation, err error) {
+ result = &v1alpha1.ControllerInstallation{}
+ err = c.client.Post().
+ Resource("controllerinstallations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerInstallation).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a controllerInstallation and updates it. Returns the server's representation of the controllerInstallation, and an error, if there is any.
+func (c *controllerInstallations) Update(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (result *v1alpha1.ControllerInstallation, err error) {
+ result = &v1alpha1.ControllerInstallation{}
+ err = c.client.Put().
+ Resource("controllerinstallations").
+ Name(controllerInstallation.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerInstallation).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *controllerInstallations) UpdateStatus(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (result *v1alpha1.ControllerInstallation, err error) {
+ result = &v1alpha1.ControllerInstallation{}
+ err = c.client.Put().
+ Resource("controllerinstallations").
+ Name(controllerInstallation.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerInstallation).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the controllerInstallation and deletes it. Returns an error if one occurs.
+func (c *controllerInstallations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("controllerinstallations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerInstallations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("controllerinstallations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched controllerInstallation.
+func (c *controllerInstallations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerInstallation, err error) {
+ result = &v1alpha1.ControllerInstallation{}
+ err = c.client.Patch(pt).
+ Resource("controllerinstallations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go
new file mode 100644
index 0000000..67890f5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go
@@ -0,0 +1,168 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ControllerRegistrationsGetter has a method to return a ControllerRegistrationInterface.
+// A group's client should implement this interface.
+type ControllerRegistrationsGetter interface {
+ ControllerRegistrations() ControllerRegistrationInterface
+}
+
+// ControllerRegistrationInterface has methods to work with ControllerRegistration resources.
+type ControllerRegistrationInterface interface {
+ Create(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.CreateOptions) (*v1alpha1.ControllerRegistration, error)
+ Update(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.UpdateOptions) (*v1alpha1.ControllerRegistration, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ControllerRegistration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ControllerRegistrationList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerRegistration, err error)
+ ControllerRegistrationExpansion
+}
+
+// controllerRegistrations implements ControllerRegistrationInterface
+type controllerRegistrations struct {
+ client rest.Interface
+}
+
+// newControllerRegistrations returns a ControllerRegistrations
+func newControllerRegistrations(c *CoreV1alpha1Client) *controllerRegistrations {
+ return &controllerRegistrations{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the controllerRegistration, and returns the corresponding controllerRegistration object, and an error if there is any.
+func (c *controllerRegistrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ControllerRegistration, err error) {
+ result = &v1alpha1.ControllerRegistration{}
+ err = c.client.Get().
+ Resource("controllerregistrations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ControllerRegistrations that match those selectors.
+func (c *controllerRegistrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ControllerRegistrationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ControllerRegistrationList{}
+ err = c.client.Get().
+ Resource("controllerregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRegistrations.
+func (c *controllerRegistrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("controllerregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a controllerRegistration and creates it. Returns the server's representation of the controllerRegistration, and an error, if there is any.
+func (c *controllerRegistrations) Create(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.CreateOptions) (result *v1alpha1.ControllerRegistration, err error) {
+ result = &v1alpha1.ControllerRegistration{}
+ err = c.client.Post().
+ Resource("controllerregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a controllerRegistration and updates it. Returns the server's representation of the controllerRegistration, and an error, if there is any.
+func (c *controllerRegistrations) Update(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.UpdateOptions) (result *v1alpha1.ControllerRegistration, err error) {
+ result = &v1alpha1.ControllerRegistration{}
+ err = c.client.Put().
+ Resource("controllerregistrations").
+ Name(controllerRegistration.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the controllerRegistration and deletes it. Returns an error if one occurs.
+func (c *controllerRegistrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("controllerregistrations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerRegistrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("controllerregistrations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched controllerRegistration.
+func (c *controllerRegistrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerRegistration, err error) {
+ result = &v1alpha1.ControllerRegistration{}
+ err = c.client.Patch(pt).
+ Resource("controllerregistrations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go
new file mode 100644
index 0000000..3d60725
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go
@@ -0,0 +1,144 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type CoreV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ BackupBucketsGetter
+ BackupEntriesGetter
+ CloudProfilesGetter
+ ControllerInstallationsGetter
+ ControllerRegistrationsGetter
+ PlantsGetter
+ ProjectsGetter
+ QuotasGetter
+ SecretBindingsGetter
+ SeedsGetter
+ ShootsGetter
+ ShootStatesGetter
+}
+
+// CoreV1alpha1Client is used to interact with features provided by the core.gardener.cloud group.
+type CoreV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *CoreV1alpha1Client) BackupBuckets() BackupBucketInterface {
+ return newBackupBuckets(c)
+}
+
+func (c *CoreV1alpha1Client) BackupEntries(namespace string) BackupEntryInterface {
+ return newBackupEntries(c, namespace)
+}
+
+func (c *CoreV1alpha1Client) CloudProfiles() CloudProfileInterface {
+ return newCloudProfiles(c)
+}
+
+func (c *CoreV1alpha1Client) ControllerInstallations() ControllerInstallationInterface {
+ return newControllerInstallations(c)
+}
+
+func (c *CoreV1alpha1Client) ControllerRegistrations() ControllerRegistrationInterface {
+ return newControllerRegistrations(c)
+}
+
+func (c *CoreV1alpha1Client) Plants(namespace string) PlantInterface {
+ return newPlants(c, namespace)
+}
+
+func (c *CoreV1alpha1Client) Projects() ProjectInterface {
+ return newProjects(c)
+}
+
+func (c *CoreV1alpha1Client) Quotas(namespace string) QuotaInterface {
+ return newQuotas(c, namespace)
+}
+
+func (c *CoreV1alpha1Client) SecretBindings(namespace string) SecretBindingInterface {
+ return newSecretBindings(c, namespace)
+}
+
+func (c *CoreV1alpha1Client) Seeds() SeedInterface {
+ return newSeeds(c)
+}
+
+func (c *CoreV1alpha1Client) Shoots(namespace string) ShootInterface {
+ return newShoots(c, namespace)
+}
+
+func (c *CoreV1alpha1Client) ShootStates(namespace string) ShootStateInterface {
+ return newShootStates(c, namespace)
+}
+
+// NewForConfig creates a new CoreV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*CoreV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &CoreV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CoreV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CoreV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new CoreV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *CoreV1alpha1Client {
+ return &CoreV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CoreV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go
new file mode 100644
index 0000000..828c8eb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000..89d9627
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go
@@ -0,0 +1,43 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type BackupBucketExpansion interface{}
+
+type BackupEntryExpansion interface{}
+
+type CloudProfileExpansion interface{}
+
+type ControllerInstallationExpansion interface{}
+
+type ControllerRegistrationExpansion interface{}
+
+type PlantExpansion interface{}
+
+type ProjectExpansion interface{}
+
+type QuotaExpansion interface{}
+
+type SecretBindingExpansion interface{}
+
+type SeedExpansion interface{}
+
+type ShootExpansion interface{}
+
+type ShootStateExpansion interface{}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go
new file mode 100644
index 0000000..2c5fb0c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// PlantsGetter has a method to return a PlantInterface.
+// A group's client should implement this interface.
+type PlantsGetter interface {
+ Plants(namespace string) PlantInterface
+}
+
+// PlantInterface has methods to work with Plant resources.
+type PlantInterface interface {
+ Create(ctx context.Context, plant *v1alpha1.Plant, opts v1.CreateOptions) (*v1alpha1.Plant, error)
+ Update(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (*v1alpha1.Plant, error)
+ UpdateStatus(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (*v1alpha1.Plant, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Plant, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PlantList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Plant, err error)
+ PlantExpansion
+}
+
+// plants implements PlantInterface
+type plants struct {
+ client rest.Interface
+ ns string
+}
+
+// newPlants returns a Plants
+func newPlants(c *CoreV1alpha1Client, namespace string) *plants {
+ return &plants{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the plant, and returns the corresponding plant object, and an error if there is any.
+func (c *plants) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Plant, err error) {
+ result = &v1alpha1.Plant{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Plants that match those selectors.
+func (c *plants) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlantList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.PlantList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested plants.
+func (c *plants) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a plant and creates it. Returns the server's representation of the plant, and an error, if there is any.
+func (c *plants) Create(ctx context.Context, plant *v1alpha1.Plant, opts v1.CreateOptions) (result *v1alpha1.Plant, err error) {
+ result = &v1alpha1.Plant{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(plant).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a plant and updates it. Returns the server's representation of the plant, and an error, if there is any.
+func (c *plants) Update(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (result *v1alpha1.Plant, err error) {
+ result = &v1alpha1.Plant{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(plant.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(plant).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *plants) UpdateStatus(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (result *v1alpha1.Plant, err error) {
+ result = &v1alpha1.Plant{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(plant.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(plant).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the plant and deletes it. Returns an error if one occurs.
+func (c *plants) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *plants) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched plant.
+func (c *plants) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Plant, err error) {
+ result = &v1alpha1.Plant{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("plants").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go
new file mode 100644
index 0000000..a42bf15
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ProjectsGetter has a method to return a ProjectInterface.
+// A group's client should implement this interface.
+type ProjectsGetter interface {
+ Projects() ProjectInterface
+}
+
+// ProjectInterface has methods to work with Project resources.
+type ProjectInterface interface {
+ Create(ctx context.Context, project *v1alpha1.Project, opts v1.CreateOptions) (*v1alpha1.Project, error)
+ Update(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (*v1alpha1.Project, error)
+ UpdateStatus(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (*v1alpha1.Project, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Project, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ProjectList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Project, err error)
+ ProjectExpansion
+}
+
+// projects implements ProjectInterface
+type projects struct {
+ client rest.Interface
+}
+
+// newProjects returns a Projects
+func newProjects(c *CoreV1alpha1Client) *projects {
+ return &projects{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the project, and returns the corresponding project object, and an error if there is any.
+func (c *projects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Project, err error) {
+ result = &v1alpha1.Project{}
+ err = c.client.Get().
+ Resource("projects").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Projects that match those selectors.
+func (c *projects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ProjectList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ProjectList{}
+ err = c.client.Get().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested projects.
+func (c *projects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any.
+func (c *projects) Create(ctx context.Context, project *v1alpha1.Project, opts v1.CreateOptions) (result *v1alpha1.Project, err error) {
+ result = &v1alpha1.Project{}
+ err = c.client.Post().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any.
+func (c *projects) Update(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (result *v1alpha1.Project, err error) {
+ result = &v1alpha1.Project{}
+ err = c.client.Put().
+ Resource("projects").
+ Name(project.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *projects) UpdateStatus(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (result *v1alpha1.Project, err error) {
+ result = &v1alpha1.Project{}
+ err = c.client.Put().
+ Resource("projects").
+ Name(project.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the project and deletes it. Returns an error if one occurs.
+func (c *projects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("projects").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *projects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("projects").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched project.
+func (c *projects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Project, err error) {
+ result = &v1alpha1.Project{}
+ err = c.client.Patch(pt).
+ Resource("projects").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go
new file mode 100644
index 0000000..1bd530e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// QuotasGetter has a method to return a QuotaInterface.
+// A group's client should implement this interface.
+type QuotasGetter interface {
+ Quotas(namespace string) QuotaInterface
+}
+
+// QuotaInterface has methods to work with Quota resources.
+type QuotaInterface interface {
+ Create(ctx context.Context, quota *v1alpha1.Quota, opts v1.CreateOptions) (*v1alpha1.Quota, error)
+ Update(ctx context.Context, quota *v1alpha1.Quota, opts v1.UpdateOptions) (*v1alpha1.Quota, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Quota, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.QuotaList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Quota, err error)
+ QuotaExpansion
+}
+
+// quotas implements QuotaInterface
+type quotas struct {
+ client rest.Interface
+ ns string
+}
+
+// newQuotas returns a Quotas
+func newQuotas(c *CoreV1alpha1Client, namespace string) *quotas {
+ return "as{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the quota, and returns the corresponding quota object, and an error if there is any.
+func (c *quotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Quota, err error) {
+ result = &v1alpha1.Quota{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Quotas that match those selectors.
+func (c *quotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.QuotaList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.QuotaList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested quotas.
+func (c *quotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a quota and creates it. Returns the server's representation of the quota, and an error, if there is any.
+func (c *quotas) Create(ctx context.Context, quota *v1alpha1.Quota, opts v1.CreateOptions) (result *v1alpha1.Quota, err error) {
+ result = &v1alpha1.Quota{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(quota).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a quota and updates it. Returns the server's representation of the quota, and an error, if there is any.
+func (c *quotas) Update(ctx context.Context, quota *v1alpha1.Quota, opts v1.UpdateOptions) (result *v1alpha1.Quota, err error) {
+ result = &v1alpha1.Quota{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(quota.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(quota).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the quota and deletes it. Returns an error if one occurs.
+func (c *quotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *quotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched quota.
+func (c *quotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Quota, err error) {
+ result = &v1alpha1.Quota{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go
new file mode 100644
index 0000000..0798e05
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// SecretBindingsGetter has a method to return a SecretBindingInterface.
+// A group's client should implement this interface.
+type SecretBindingsGetter interface {
+ SecretBindings(namespace string) SecretBindingInterface
+}
+
+// SecretBindingInterface has methods to work with SecretBinding resources.
+type SecretBindingInterface interface {
+ Create(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.CreateOptions) (*v1alpha1.SecretBinding, error)
+ Update(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.UpdateOptions) (*v1alpha1.SecretBinding, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.SecretBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.SecretBindingList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SecretBinding, err error)
+ SecretBindingExpansion
+}
+
+// secretBindings implements SecretBindingInterface
+type secretBindings struct {
+ client rest.Interface
+ ns string
+}
+
+// newSecretBindings returns a SecretBindings
+func newSecretBindings(c *CoreV1alpha1Client, namespace string) *secretBindings {
+ return &secretBindings{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the secretBinding, and returns the corresponding secretBinding object, and an error if there is any.
+func (c *secretBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.SecretBinding, err error) {
+ result = &v1alpha1.SecretBinding{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of SecretBindings that match those selectors.
+func (c *secretBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.SecretBindingList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.SecretBindingList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested secretBindings.
+func (c *secretBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a secretBinding and creates it. Returns the server's representation of the secretBinding, and an error, if there is any.
+func (c *secretBindings) Create(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.CreateOptions) (result *v1alpha1.SecretBinding, err error) {
+ result = &v1alpha1.SecretBinding{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(secretBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a secretBinding and updates it. Returns the server's representation of the secretBinding, and an error, if there is any.
+func (c *secretBindings) Update(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.UpdateOptions) (result *v1alpha1.SecretBinding, err error) {
+ result = &v1alpha1.SecretBinding{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(secretBinding.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(secretBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the secretBinding and deletes it. Returns an error if one occurs.
+func (c *secretBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *secretBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched secretBinding.
+func (c *secretBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SecretBinding, err error) {
+ result = &v1alpha1.SecretBinding{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go
new file mode 100644
index 0000000..97dee93
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// SeedsGetter has a method to return a SeedInterface.
+// A group's client should implement this interface.
+type SeedsGetter interface {
+ Seeds() SeedInterface
+}
+
+// SeedInterface has methods to work with Seed resources.
+type SeedInterface interface {
+ Create(ctx context.Context, seed *v1alpha1.Seed, opts v1.CreateOptions) (*v1alpha1.Seed, error)
+ Update(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (*v1alpha1.Seed, error)
+ UpdateStatus(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (*v1alpha1.Seed, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Seed, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.SeedList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Seed, err error)
+ SeedExpansion
+}
+
+// seeds implements SeedInterface
+type seeds struct {
+ client rest.Interface
+}
+
+// newSeeds returns a Seeds
+func newSeeds(c *CoreV1alpha1Client) *seeds {
+ return &seeds{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the seed, and returns the corresponding seed object, and an error if there is any.
+func (c *seeds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Seed, err error) {
+ result = &v1alpha1.Seed{}
+ err = c.client.Get().
+ Resource("seeds").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Seeds that match those selectors.
+func (c *seeds) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.SeedList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.SeedList{}
+ err = c.client.Get().
+ Resource("seeds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested seeds.
+func (c *seeds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("seeds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a seed and creates it. Returns the server's representation of the seed, and an error, if there is any.
+func (c *seeds) Create(ctx context.Context, seed *v1alpha1.Seed, opts v1.CreateOptions) (result *v1alpha1.Seed, err error) {
+ result = &v1alpha1.Seed{}
+ err = c.client.Post().
+ Resource("seeds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(seed).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a seed and updates it. Returns the server's representation of the seed, and an error, if there is any.
+func (c *seeds) Update(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (result *v1alpha1.Seed, err error) {
+ result = &v1alpha1.Seed{}
+ err = c.client.Put().
+ Resource("seeds").
+ Name(seed.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(seed).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *seeds) UpdateStatus(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (result *v1alpha1.Seed, err error) {
+ result = &v1alpha1.Seed{}
+ err = c.client.Put().
+ Resource("seeds").
+ Name(seed.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(seed).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the seed and deletes it. Returns an error if one occurs.
+func (c *seeds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("seeds").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *seeds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("seeds").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched seed.
+func (c *seeds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Seed, err error) {
+ result = &v1alpha1.Seed{}
+ err = c.client.Patch(pt).
+ Resource("seeds").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go
new file mode 100644
index 0000000..5ff105c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ShootsGetter has a method to return a ShootInterface.
+// A group's client should implement this interface.
+type ShootsGetter interface {
+ Shoots(namespace string) ShootInterface
+}
+
+// ShootInterface has methods to work with Shoot resources.
+type ShootInterface interface {
+ Create(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.CreateOptions) (*v1alpha1.Shoot, error)
+ Update(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (*v1alpha1.Shoot, error)
+ UpdateStatus(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (*v1alpha1.Shoot, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Shoot, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ShootList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Shoot, err error)
+ ShootExpansion
+}
+
+// shoots implements ShootInterface
+type shoots struct {
+ client rest.Interface
+ ns string
+}
+
+// newShoots returns a Shoots
+func newShoots(c *CoreV1alpha1Client, namespace string) *shoots {
+ return &shoots{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the shoot, and returns the corresponding shoot object, and an error if there is any.
+func (c *shoots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Shoot, err error) {
+ result = &v1alpha1.Shoot{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Shoots that match those selectors.
+func (c *shoots) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ShootList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ShootList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested shoots.
+func (c *shoots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a shoot and creates it. Returns the server's representation of the shoot, and an error, if there is any.
+func (c *shoots) Create(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.CreateOptions) (result *v1alpha1.Shoot, err error) {
+ result = &v1alpha1.Shoot{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shoot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a shoot and updates it. Returns the server's representation of the shoot, and an error, if there is any.
+func (c *shoots) Update(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (result *v1alpha1.Shoot, err error) {
+ result = &v1alpha1.Shoot{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(shoot.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shoot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *shoots) UpdateStatus(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (result *v1alpha1.Shoot, err error) {
+ result = &v1alpha1.Shoot{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(shoot.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shoot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the shoot and deletes it. Returns an error if one occurs.
+func (c *shoots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *shoots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched shoot.
+func (c *shoots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Shoot, err error) {
+ result = &v1alpha1.Shoot{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go
new file mode 100644
index 0000000..2a28e0a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ShootStatesGetter has a method to return a ShootStateInterface.
+// A group's client should implement this interface.
+type ShootStatesGetter interface {
+ ShootStates(namespace string) ShootStateInterface
+}
+
+// ShootStateInterface has methods to work with ShootState resources.
+type ShootStateInterface interface {
+ Create(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.CreateOptions) (*v1alpha1.ShootState, error)
+ Update(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.UpdateOptions) (*v1alpha1.ShootState, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ShootState, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ShootStateList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ShootState, err error)
+ ShootStateExpansion
+}
+
+// shootStates implements ShootStateInterface
+type shootStates struct {
+ client rest.Interface
+ ns string
+}
+
+// newShootStates returns a ShootStates
+func newShootStates(c *CoreV1alpha1Client, namespace string) *shootStates {
+ return &shootStates{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the shootState, and returns the corresponding shootState object, and an error if there is any.
+func (c *shootStates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ShootState, err error) {
+ result = &v1alpha1.ShootState{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("shootstates").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ShootStates that match those selectors.
+func (c *shootStates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ShootStateList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ShootStateList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("shootstates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested shootStates.
+func (c *shootStates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("shootstates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a shootState and creates it. Returns the server's representation of the shootState, and an error, if there is any.
+func (c *shootStates) Create(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.CreateOptions) (result *v1alpha1.ShootState, err error) {
+ result = &v1alpha1.ShootState{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("shootstates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shootState).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a shootState and updates it. Returns the server's representation of the shootState, and an error, if there is any.
+func (c *shootStates) Update(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.UpdateOptions) (result *v1alpha1.ShootState, err error) {
+ result = &v1alpha1.ShootState{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("shootstates").
+ Name(shootState.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shootState).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the shootState and deletes it. Returns an error if one occurs.
+func (c *shootStates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("shootstates").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *shootStates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("shootstates").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched shootState.
+func (c *shootStates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ShootState, err error) {
+ result = &v1alpha1.ShootState{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("shootstates").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go
new file mode 100644
index 0000000..a7238c9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BackupBucketsGetter has a method to return a BackupBucketInterface.
+// A group's client should implement this interface.
+type BackupBucketsGetter interface {
+ BackupBuckets() BackupBucketInterface
+}
+
+// BackupBucketInterface has methods to work with BackupBucket resources.
+type BackupBucketInterface interface {
+ Create(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.CreateOptions) (*v1beta1.BackupBucket, error)
+ Update(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (*v1beta1.BackupBucket, error)
+ UpdateStatus(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (*v1beta1.BackupBucket, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.BackupBucket, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.BackupBucketList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupBucket, err error)
+ BackupBucketExpansion
+}
+
+// backupBuckets implements BackupBucketInterface
+type backupBuckets struct {
+ client rest.Interface
+}
+
+// newBackupBuckets returns a BackupBuckets
+func newBackupBuckets(c *CoreV1beta1Client) *backupBuckets {
+ return &backupBuckets{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the backupBucket, and returns the corresponding backupBucket object, and an error if there is any.
+func (c *backupBuckets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.BackupBucket, err error) {
+ result = &v1beta1.BackupBucket{}
+ err = c.client.Get().
+ Resource("backupbuckets").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of BackupBuckets that match those selectors.
+func (c *backupBuckets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.BackupBucketList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.BackupBucketList{}
+ err = c.client.Get().
+ Resource("backupbuckets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested backupBuckets.
+func (c *backupBuckets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("backupbuckets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a backupBucket and creates it. Returns the server's representation of the backupBucket, and an error, if there is any.
+func (c *backupBuckets) Create(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.CreateOptions) (result *v1beta1.BackupBucket, err error) {
+ result = &v1beta1.BackupBucket{}
+ err = c.client.Post().
+ Resource("backupbuckets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupBucket).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a backupBucket and updates it. Returns the server's representation of the backupBucket, and an error, if there is any.
+func (c *backupBuckets) Update(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (result *v1beta1.BackupBucket, err error) {
+ result = &v1beta1.BackupBucket{}
+ err = c.client.Put().
+ Resource("backupbuckets").
+ Name(backupBucket.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupBucket).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *backupBuckets) UpdateStatus(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (result *v1beta1.BackupBucket, err error) {
+ result = &v1beta1.BackupBucket{}
+ err = c.client.Put().
+ Resource("backupbuckets").
+ Name(backupBucket.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupBucket).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the backupBucket and deletes it. Returns an error if one occurs.
+func (c *backupBuckets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("backupbuckets").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *backupBuckets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("backupbuckets").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched backupBucket.
+func (c *backupBuckets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupBucket, err error) {
+ result = &v1beta1.BackupBucket{}
+ err = c.client.Patch(pt).
+ Resource("backupbuckets").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go
new file mode 100644
index 0000000..13880f0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BackupEntriesGetter has a method to return a BackupEntryInterface.
+// A group's client should implement this interface.
+type BackupEntriesGetter interface {
+ BackupEntries(namespace string) BackupEntryInterface
+}
+
+// BackupEntryInterface has methods to work with BackupEntry resources.
+type BackupEntryInterface interface {
+ Create(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.CreateOptions) (*v1beta1.BackupEntry, error)
+ Update(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (*v1beta1.BackupEntry, error)
+ UpdateStatus(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (*v1beta1.BackupEntry, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.BackupEntry, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.BackupEntryList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupEntry, err error)
+ BackupEntryExpansion
+}
+
+// backupEntries implements BackupEntryInterface
+type backupEntries struct {
+ client rest.Interface
+ ns string
+}
+
+// newBackupEntries returns a BackupEntries
+func newBackupEntries(c *CoreV1beta1Client, namespace string) *backupEntries {
+ return &backupEntries{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the backupEntry, and returns the corresponding backupEntry object, and an error if there is any.
+func (c *backupEntries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.BackupEntry, err error) {
+ result = &v1beta1.BackupEntry{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of BackupEntries that match those selectors.
+func (c *backupEntries) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.BackupEntryList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.BackupEntryList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested backupEntries.
+func (c *backupEntries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a backupEntry and creates it. Returns the server's representation of the backupEntry, and an error, if there is any.
+func (c *backupEntries) Create(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.CreateOptions) (result *v1beta1.BackupEntry, err error) {
+ result = &v1beta1.BackupEntry{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupEntry).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a backupEntry and updates it. Returns the server's representation of the backupEntry, and an error, if there is any.
+func (c *backupEntries) Update(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (result *v1beta1.BackupEntry, err error) {
+ result = &v1beta1.BackupEntry{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(backupEntry.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupEntry).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *backupEntries) UpdateStatus(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (result *v1beta1.BackupEntry, err error) {
+ result = &v1beta1.BackupEntry{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(backupEntry.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backupEntry).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the backupEntry and deletes it. Returns an error if one occurs.
+func (c *backupEntries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *backupEntries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("backupentries").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched backupEntry.
+func (c *backupEntries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupEntry, err error) {
+ result = &v1beta1.BackupEntry{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("backupentries").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go
new file mode 100644
index 0000000..37b7cab
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go
@@ -0,0 +1,168 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CloudProfilesGetter has a method to return a CloudProfileInterface.
+// A group's client should implement this interface.
+type CloudProfilesGetter interface {
+ CloudProfiles() CloudProfileInterface
+}
+
+// CloudProfileInterface has methods to work with CloudProfile resources.
+type CloudProfileInterface interface {
+ Create(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.CreateOptions) (*v1beta1.CloudProfile, error)
+ Update(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.UpdateOptions) (*v1beta1.CloudProfile, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CloudProfile, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CloudProfileList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudProfile, err error)
+ CloudProfileExpansion
+}
+
+// cloudProfiles implements CloudProfileInterface
+type cloudProfiles struct {
+ client rest.Interface
+}
+
+// newCloudProfiles returns a CloudProfiles
+func newCloudProfiles(c *CoreV1beta1Client) *cloudProfiles {
+ return &cloudProfiles{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the cloudProfile, and returns the corresponding cloudProfile object, and an error if there is any.
+func (c *cloudProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CloudProfile, err error) {
+ result = &v1beta1.CloudProfile{}
+ err = c.client.Get().
+ Resource("cloudprofiles").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CloudProfiles that match those selectors.
+func (c *cloudProfiles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CloudProfileList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.CloudProfileList{}
+ err = c.client.Get().
+ Resource("cloudprofiles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested cloudProfiles.
+func (c *cloudProfiles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("cloudprofiles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a cloudProfile and creates it. Returns the server's representation of the cloudProfile, and an error, if there is any.
+func (c *cloudProfiles) Create(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.CreateOptions) (result *v1beta1.CloudProfile, err error) {
+ result = &v1beta1.CloudProfile{}
+ err = c.client.Post().
+ Resource("cloudprofiles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cloudProfile).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a cloudProfile and updates it. Returns the server's representation of the cloudProfile, and an error, if there is any.
+func (c *cloudProfiles) Update(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.UpdateOptions) (result *v1beta1.CloudProfile, err error) {
+ result = &v1beta1.CloudProfile{}
+ err = c.client.Put().
+ Resource("cloudprofiles").
+ Name(cloudProfile.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(cloudProfile).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the cloudProfile and deletes it. Returns an error if one occurs.
+func (c *cloudProfiles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("cloudprofiles").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *cloudProfiles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("cloudprofiles").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched cloudProfile.
+func (c *cloudProfiles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudProfile, err error) {
+ result = &v1beta1.CloudProfile{}
+ err = c.client.Patch(pt).
+ Resource("cloudprofiles").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go
new file mode 100644
index 0000000..5756e84
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ControllerInstallationsGetter has a method to return a ControllerInstallationInterface.
+// A group's client should implement this interface.
+type ControllerInstallationsGetter interface {
+ ControllerInstallations() ControllerInstallationInterface
+}
+
+// ControllerInstallationInterface has methods to work with ControllerInstallation resources.
+type ControllerInstallationInterface interface {
+ Create(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.CreateOptions) (*v1beta1.ControllerInstallation, error)
+ Update(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (*v1beta1.ControllerInstallation, error)
+ UpdateStatus(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (*v1beta1.ControllerInstallation, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerInstallation, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerInstallationList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerInstallation, err error)
+ ControllerInstallationExpansion
+}
+
+// controllerInstallations implements ControllerInstallationInterface
+type controllerInstallations struct {
+ client rest.Interface
+}
+
+// newControllerInstallations returns a ControllerInstallations
+func newControllerInstallations(c *CoreV1beta1Client) *controllerInstallations {
+ return &controllerInstallations{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the controllerInstallation, and returns the corresponding controllerInstallation object, and an error if there is any.
+func (c *controllerInstallations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerInstallation, err error) {
+ result = &v1beta1.ControllerInstallation{}
+ err = c.client.Get().
+ Resource("controllerinstallations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ControllerInstallations that match those selectors.
+func (c *controllerInstallations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerInstallationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.ControllerInstallationList{}
+ err = c.client.Get().
+ Resource("controllerinstallations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerInstallations.
+func (c *controllerInstallations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("controllerinstallations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a controllerInstallation and creates it. Returns the server's representation of the controllerInstallation, and an error, if there is any.
+func (c *controllerInstallations) Create(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.CreateOptions) (result *v1beta1.ControllerInstallation, err error) {
+ result = &v1beta1.ControllerInstallation{}
+ err = c.client.Post().
+ Resource("controllerinstallations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerInstallation).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a controllerInstallation and updates it. Returns the server's representation of the controllerInstallation, and an error, if there is any.
+func (c *controllerInstallations) Update(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (result *v1beta1.ControllerInstallation, err error) {
+ result = &v1beta1.ControllerInstallation{}
+ err = c.client.Put().
+ Resource("controllerinstallations").
+ Name(controllerInstallation.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerInstallation).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *controllerInstallations) UpdateStatus(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (result *v1beta1.ControllerInstallation, err error) {
+ result = &v1beta1.ControllerInstallation{}
+ err = c.client.Put().
+ Resource("controllerinstallations").
+ Name(controllerInstallation.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerInstallation).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the controllerInstallation and deletes it. Returns an error if one occurs.
+func (c *controllerInstallations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("controllerinstallations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerInstallations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("controllerinstallations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched controllerInstallation.
+func (c *controllerInstallations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerInstallation, err error) {
+ result = &v1beta1.ControllerInstallation{}
+ err = c.client.Patch(pt).
+ Resource("controllerinstallations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go
new file mode 100644
index 0000000..c7f66b2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go
@@ -0,0 +1,168 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ControllerRegistrationsGetter has a method to return a ControllerRegistrationInterface.
+// A group's client should implement this interface.
+type ControllerRegistrationsGetter interface {
+ ControllerRegistrations() ControllerRegistrationInterface
+}
+
+// ControllerRegistrationInterface has methods to work with ControllerRegistration resources.
+type ControllerRegistrationInterface interface {
+ Create(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.CreateOptions) (*v1beta1.ControllerRegistration, error)
+ Update(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.UpdateOptions) (*v1beta1.ControllerRegistration, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRegistration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRegistrationList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRegistration, err error)
+ ControllerRegistrationExpansion
+}
+
+// controllerRegistrations implements ControllerRegistrationInterface
+type controllerRegistrations struct {
+ client rest.Interface
+}
+
+// newControllerRegistrations returns a ControllerRegistrations
+func newControllerRegistrations(c *CoreV1beta1Client) *controllerRegistrations {
+ return &controllerRegistrations{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the controllerRegistration, and returns the corresponding controllerRegistration object, and an error if there is any.
+func (c *controllerRegistrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRegistration, err error) {
+ result = &v1beta1.ControllerRegistration{}
+ err = c.client.Get().
+ Resource("controllerregistrations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ControllerRegistrations that match those selectors.
+func (c *controllerRegistrations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRegistrationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.ControllerRegistrationList{}
+ err = c.client.Get().
+ Resource("controllerregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRegistrations.
+func (c *controllerRegistrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("controllerregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a controllerRegistration and creates it. Returns the server's representation of the controllerRegistration, and an error, if there is any.
+func (c *controllerRegistrations) Create(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.CreateOptions) (result *v1beta1.ControllerRegistration, err error) {
+ result = &v1beta1.ControllerRegistration{}
+ err = c.client.Post().
+ Resource("controllerregistrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a controllerRegistration and updates it. Returns the server's representation of the controllerRegistration, and an error, if there is any.
+func (c *controllerRegistrations) Update(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.UpdateOptions) (result *v1beta1.ControllerRegistration, err error) {
+ result = &v1beta1.ControllerRegistration{}
+ err = c.client.Put().
+ Resource("controllerregistrations").
+ Name(controllerRegistration.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(controllerRegistration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the controllerRegistration and deletes it. Returns an error if one occurs.
+func (c *controllerRegistrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("controllerregistrations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *controllerRegistrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("controllerregistrations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched controllerRegistration.
+func (c *controllerRegistrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRegistration, err error) {
+ result = &v1beta1.ControllerRegistration{}
+ err = c.client.Patch(pt).
+ Resource("controllerregistrations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go
new file mode 100644
index 0000000..01ada3d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go
@@ -0,0 +1,139 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type CoreV1beta1Interface interface {
+ RESTClient() rest.Interface
+ BackupBucketsGetter
+ BackupEntriesGetter
+ CloudProfilesGetter
+ ControllerInstallationsGetter
+ ControllerRegistrationsGetter
+ PlantsGetter
+ ProjectsGetter
+ QuotasGetter
+ SecretBindingsGetter
+ SeedsGetter
+ ShootsGetter
+}
+
+// CoreV1beta1Client is used to interact with features provided by the core.gardener.cloud group.
+type CoreV1beta1Client struct {
+ restClient rest.Interface
+}
+
+func (c *CoreV1beta1Client) BackupBuckets() BackupBucketInterface {
+ return newBackupBuckets(c)
+}
+
+func (c *CoreV1beta1Client) BackupEntries(namespace string) BackupEntryInterface {
+ return newBackupEntries(c, namespace)
+}
+
+func (c *CoreV1beta1Client) CloudProfiles() CloudProfileInterface {
+ return newCloudProfiles(c)
+}
+
+func (c *CoreV1beta1Client) ControllerInstallations() ControllerInstallationInterface {
+ return newControllerInstallations(c)
+}
+
+func (c *CoreV1beta1Client) ControllerRegistrations() ControllerRegistrationInterface {
+ return newControllerRegistrations(c)
+}
+
+func (c *CoreV1beta1Client) Plants(namespace string) PlantInterface {
+ return newPlants(c, namespace)
+}
+
+func (c *CoreV1beta1Client) Projects() ProjectInterface {
+ return newProjects(c)
+}
+
+func (c *CoreV1beta1Client) Quotas(namespace string) QuotaInterface {
+ return newQuotas(c, namespace)
+}
+
+func (c *CoreV1beta1Client) SecretBindings(namespace string) SecretBindingInterface {
+ return newSecretBindings(c, namespace)
+}
+
+func (c *CoreV1beta1Client) Seeds() SeedInterface {
+ return newSeeds(c)
+}
+
+func (c *CoreV1beta1Client) Shoots(namespace string) ShootInterface {
+ return newShoots(c, namespace)
+}
+
+// NewForConfig creates a new CoreV1beta1Client for the given config.
+func NewForConfig(c *rest.Config) (*CoreV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &CoreV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CoreV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CoreV1beta1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new CoreV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *CoreV1beta1Client {
+ return &CoreV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CoreV1beta1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go
new file mode 100644
index 0000000..11866df
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go
new file mode 100644
index 0000000..6ca0f94
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go
@@ -0,0 +1,41 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type BackupBucketExpansion interface{}
+
+type BackupEntryExpansion interface{}
+
+type CloudProfileExpansion interface{}
+
+type ControllerInstallationExpansion interface{}
+
+type ControllerRegistrationExpansion interface{}
+
+type PlantExpansion interface{}
+
+type ProjectExpansion interface{}
+
+type QuotaExpansion interface{}
+
+type SecretBindingExpansion interface{}
+
+type SeedExpansion interface{}
+
+type ShootExpansion interface{}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go
new file mode 100644
index 0000000..1beb236
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// PlantsGetter has a method to return a PlantInterface.
+// A group's client should implement this interface.
+type PlantsGetter interface {
+ Plants(namespace string) PlantInterface
+}
+
+// PlantInterface has methods to work with Plant resources.
+type PlantInterface interface {
+ Create(ctx context.Context, plant *v1beta1.Plant, opts v1.CreateOptions) (*v1beta1.Plant, error)
+ Update(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (*v1beta1.Plant, error)
+ UpdateStatus(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (*v1beta1.Plant, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Plant, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PlantList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Plant, err error)
+ PlantExpansion
+}
+
+// plants implements PlantInterface
+type plants struct {
+ client rest.Interface
+ ns string
+}
+
+// newPlants returns a Plants
+func newPlants(c *CoreV1beta1Client, namespace string) *plants {
+ return &plants{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the plant, and returns the corresponding plant object, and an error if there is any.
+func (c *plants) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Plant, err error) {
+ result = &v1beta1.Plant{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Plants that match those selectors.
+func (c *plants) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PlantList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.PlantList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested plants.
+func (c *plants) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a plant and creates it. Returns the server's representation of the plant, and an error, if there is any.
+func (c *plants) Create(ctx context.Context, plant *v1beta1.Plant, opts v1.CreateOptions) (result *v1beta1.Plant, err error) {
+ result = &v1beta1.Plant{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(plant).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a plant and updates it. Returns the server's representation of the plant, and an error, if there is any.
+func (c *plants) Update(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (result *v1beta1.Plant, err error) {
+ result = &v1beta1.Plant{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(plant.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(plant).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *plants) UpdateStatus(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (result *v1beta1.Plant, err error) {
+ result = &v1beta1.Plant{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(plant.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(plant).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the plant and deletes it. Returns an error if one occurs.
+func (c *plants) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("plants").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *plants) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("plants").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched plant.
+func (c *plants) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Plant, err error) {
+ result = &v1beta1.Plant{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("plants").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go
new file mode 100644
index 0000000..b0d08d4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ProjectsGetter has a method to return a ProjectInterface.
+// A group's client should implement this interface.
+type ProjectsGetter interface {
+ Projects() ProjectInterface
+}
+
+// ProjectInterface has methods to work with Project resources.
+type ProjectInterface interface {
+ Create(ctx context.Context, project *v1beta1.Project, opts v1.CreateOptions) (*v1beta1.Project, error)
+ Update(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (*v1beta1.Project, error)
+ UpdateStatus(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (*v1beta1.Project, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Project, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ProjectList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Project, err error)
+ ProjectExpansion
+}
+
+// projects implements ProjectInterface
+type projects struct {
+ client rest.Interface
+}
+
+// newProjects returns a Projects
+func newProjects(c *CoreV1beta1Client) *projects {
+ return &projects{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the project, and returns the corresponding project object, and an error if there is any.
+func (c *projects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Project, err error) {
+ result = &v1beta1.Project{}
+ err = c.client.Get().
+ Resource("projects").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Projects that match those selectors.
+func (c *projects) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ProjectList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.ProjectList{}
+ err = c.client.Get().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested projects.
+func (c *projects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any.
+func (c *projects) Create(ctx context.Context, project *v1beta1.Project, opts v1.CreateOptions) (result *v1beta1.Project, err error) {
+ result = &v1beta1.Project{}
+ err = c.client.Post().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any.
+func (c *projects) Update(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (result *v1beta1.Project, err error) {
+ result = &v1beta1.Project{}
+ err = c.client.Put().
+ Resource("projects").
+ Name(project.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *projects) UpdateStatus(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (result *v1beta1.Project, err error) {
+ result = &v1beta1.Project{}
+ err = c.client.Put().
+ Resource("projects").
+ Name(project.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the project and deletes it. Returns an error if one occurs.
+func (c *projects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("projects").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *projects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("projects").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched project.
+func (c *projects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Project, err error) {
+ result = &v1beta1.Project{}
+ err = c.client.Patch(pt).
+ Resource("projects").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go
new file mode 100644
index 0000000..73aef2b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// QuotasGetter has a method to return a QuotaInterface.
+// A group's client should implement this interface.
+type QuotasGetter interface {
+ Quotas(namespace string) QuotaInterface
+}
+
+// QuotaInterface has methods to work with Quota resources.
+type QuotaInterface interface {
+ Create(ctx context.Context, quota *v1beta1.Quota, opts v1.CreateOptions) (*v1beta1.Quota, error)
+ Update(ctx context.Context, quota *v1beta1.Quota, opts v1.UpdateOptions) (*v1beta1.Quota, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Quota, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.QuotaList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Quota, err error)
+ QuotaExpansion
+}
+
+// quotas implements QuotaInterface
+type quotas struct {
+ client rest.Interface
+ ns string
+}
+
+// newQuotas returns a Quotas
+func newQuotas(c *CoreV1beta1Client, namespace string) *quotas {
+ return "as{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the quota, and returns the corresponding quota object, and an error if there is any.
+func (c *quotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Quota, err error) {
+ result = &v1beta1.Quota{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Quotas that match those selectors.
+func (c *quotas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.QuotaList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.QuotaList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested quotas.
+func (c *quotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a quota and creates it. Returns the server's representation of the quota, and an error, if there is any.
+func (c *quotas) Create(ctx context.Context, quota *v1beta1.Quota, opts v1.CreateOptions) (result *v1beta1.Quota, err error) {
+ result = &v1beta1.Quota{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(quota).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a quota and updates it. Returns the server's representation of the quota, and an error, if there is any.
+func (c *quotas) Update(ctx context.Context, quota *v1beta1.Quota, opts v1.UpdateOptions) (result *v1beta1.Quota, err error) {
+ result = &v1beta1.Quota{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(quota.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(quota).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the quota and deletes it. Returns an error if one occurs.
+func (c *quotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *quotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("quotas").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched quota.
+func (c *quotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Quota, err error) {
+ result = &v1beta1.Quota{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("quotas").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go
new file mode 100644
index 0000000..953846e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go
@@ -0,0 +1,178 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// SecretBindingsGetter has a method to return a SecretBindingInterface.
+// A group's client should implement this interface.
+type SecretBindingsGetter interface {
+ SecretBindings(namespace string) SecretBindingInterface
+}
+
+// SecretBindingInterface has methods to work with SecretBinding resources.
+type SecretBindingInterface interface {
+ Create(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.CreateOptions) (*v1beta1.SecretBinding, error)
+ Update(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.UpdateOptions) (*v1beta1.SecretBinding, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.SecretBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.SecretBindingList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.SecretBinding, err error)
+ SecretBindingExpansion
+}
+
+// secretBindings implements SecretBindingInterface
+type secretBindings struct {
+ client rest.Interface
+ ns string
+}
+
+// newSecretBindings returns a SecretBindings
+func newSecretBindings(c *CoreV1beta1Client, namespace string) *secretBindings {
+ return &secretBindings{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the secretBinding, and returns the corresponding secretBinding object, and an error if there is any.
+func (c *secretBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.SecretBinding, err error) {
+ result = &v1beta1.SecretBinding{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of SecretBindings that match those selectors.
+func (c *secretBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.SecretBindingList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.SecretBindingList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested secretBindings.
+func (c *secretBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a secretBinding and creates it. Returns the server's representation of the secretBinding, and an error, if there is any.
+func (c *secretBindings) Create(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.CreateOptions) (result *v1beta1.SecretBinding, err error) {
+ result = &v1beta1.SecretBinding{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(secretBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a secretBinding and updates it. Returns the server's representation of the secretBinding, and an error, if there is any.
+func (c *secretBindings) Update(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.UpdateOptions) (result *v1beta1.SecretBinding, err error) {
+ result = &v1beta1.SecretBinding{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(secretBinding.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(secretBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the secretBinding and deletes it. Returns an error if one occurs.
+func (c *secretBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *secretBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("secretbindings").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched secretBinding.
+func (c *secretBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.SecretBinding, err error) {
+ result = &v1beta1.SecretBinding{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("secretbindings").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go
new file mode 100644
index 0000000..05b1df2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go
@@ -0,0 +1,184 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// SeedsGetter has a method to return a SeedInterface.
+// A group's client should implement this interface.
+type SeedsGetter interface {
+ Seeds() SeedInterface
+}
+
+// SeedInterface has methods to work with Seed resources.
+type SeedInterface interface {
+ Create(ctx context.Context, seed *v1beta1.Seed, opts v1.CreateOptions) (*v1beta1.Seed, error)
+ Update(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (*v1beta1.Seed, error)
+ UpdateStatus(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (*v1beta1.Seed, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Seed, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.SeedList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Seed, err error)
+ SeedExpansion
+}
+
+// seeds implements SeedInterface
+type seeds struct {
+ client rest.Interface
+}
+
+// newSeeds returns a Seeds
+func newSeeds(c *CoreV1beta1Client) *seeds {
+ return &seeds{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the seed, and returns the corresponding seed object, and an error if there is any.
+func (c *seeds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Seed, err error) {
+ result = &v1beta1.Seed{}
+ err = c.client.Get().
+ Resource("seeds").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Seeds that match those selectors.
+func (c *seeds) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.SeedList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.SeedList{}
+ err = c.client.Get().
+ Resource("seeds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested seeds.
+func (c *seeds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("seeds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a seed and creates it. Returns the server's representation of the seed, and an error, if there is any.
+func (c *seeds) Create(ctx context.Context, seed *v1beta1.Seed, opts v1.CreateOptions) (result *v1beta1.Seed, err error) {
+ result = &v1beta1.Seed{}
+ err = c.client.Post().
+ Resource("seeds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(seed).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a seed and updates it. Returns the server's representation of the seed, and an error, if there is any.
+func (c *seeds) Update(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (result *v1beta1.Seed, err error) {
+ result = &v1beta1.Seed{}
+ err = c.client.Put().
+ Resource("seeds").
+ Name(seed.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(seed).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *seeds) UpdateStatus(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (result *v1beta1.Seed, err error) {
+ result = &v1beta1.Seed{}
+ err = c.client.Put().
+ Resource("seeds").
+ Name(seed.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(seed).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the seed and deletes it. Returns an error if one occurs.
+func (c *seeds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("seeds").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *seeds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("seeds").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched seed.
+func (c *seeds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Seed, err error) {
+ result = &v1beta1.Seed{}
+ err = c.client.Patch(pt).
+ Resource("seeds").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go
new file mode 100644
index 0000000..b9c904c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go
@@ -0,0 +1,195 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ShootsGetter has a method to return a ShootInterface.
+// A group's client should implement this interface.
+type ShootsGetter interface {
+ Shoots(namespace string) ShootInterface
+}
+
+// ShootInterface has methods to work with Shoot resources.
+type ShootInterface interface {
+ Create(ctx context.Context, shoot *v1beta1.Shoot, opts v1.CreateOptions) (*v1beta1.Shoot, error)
+ Update(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (*v1beta1.Shoot, error)
+ UpdateStatus(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (*v1beta1.Shoot, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Shoot, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ShootList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Shoot, err error)
+ ShootExpansion
+}
+
+// shoots implements ShootInterface
+type shoots struct {
+ client rest.Interface
+ ns string
+}
+
+// newShoots returns a Shoots
+func newShoots(c *CoreV1beta1Client, namespace string) *shoots {
+ return &shoots{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the shoot, and returns the corresponding shoot object, and an error if there is any.
+func (c *shoots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Shoot, err error) {
+ result = &v1beta1.Shoot{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Shoots that match those selectors.
+func (c *shoots) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ShootList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.ShootList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested shoots.
+func (c *shoots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a shoot and creates it. Returns the server's representation of the shoot, and an error, if there is any.
+func (c *shoots) Create(ctx context.Context, shoot *v1beta1.Shoot, opts v1.CreateOptions) (result *v1beta1.Shoot, err error) {
+ result = &v1beta1.Shoot{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shoot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a shoot and updates it. Returns the server's representation of the shoot, and an error, if there is any.
+func (c *shoots) Update(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (result *v1beta1.Shoot, err error) {
+ result = &v1beta1.Shoot{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(shoot.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shoot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *shoots) UpdateStatus(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (result *v1beta1.Shoot, err error) {
+ result = &v1beta1.Shoot{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(shoot.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(shoot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the shoot and deletes it. Returns an error if one occurs.
+func (c *shoots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *shoots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("shoots").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched shoot.
+func (c *shoots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Shoot, err error) {
+ result = &v1beta1.Shoot{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("shoots").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupbucket.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupbucket.go
new file mode 100644
index 0000000..31ac02f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupbucket.go
@@ -0,0 +1,68 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// BackupBucketLister helps list BackupBuckets.
+// All objects returned here must be treated as read-only.
+type BackupBucketLister interface {
+ // List lists all BackupBuckets in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.BackupBucket, err error)
+ // Get retrieves the BackupBucket from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.BackupBucket, error)
+ BackupBucketListerExpansion
+}
+
+// backupBucketLister implements the BackupBucketLister interface.
+type backupBucketLister struct {
+ indexer cache.Indexer
+}
+
+// NewBackupBucketLister returns a new BackupBucketLister.
+func NewBackupBucketLister(indexer cache.Indexer) BackupBucketLister {
+ return &backupBucketLister{indexer: indexer}
+}
+
+// List lists all BackupBuckets in the indexer.
+func (s *backupBucketLister) List(selector labels.Selector) (ret []*v1beta1.BackupBucket, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.BackupBucket))
+ })
+ return ret, err
+}
+
+// Get retrieves the BackupBucket from the index for a given name.
+func (s *backupBucketLister) Get(name string) (*v1beta1.BackupBucket, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("backupbucket"), name)
+ }
+ return obj.(*v1beta1.BackupBucket), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupentry.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupentry.go
new file mode 100644
index 0000000..8ff2963
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/backupentry.go
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// BackupEntryLister helps list BackupEntries.
+// All objects returned here must be treated as read-only.
+type BackupEntryLister interface {
+ // List lists all BackupEntries in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.BackupEntry, err error)
+ // BackupEntries returns an object that can list and get BackupEntries.
+ BackupEntries(namespace string) BackupEntryNamespaceLister
+ BackupEntryListerExpansion
+}
+
+// backupEntryLister implements the BackupEntryLister interface.
+type backupEntryLister struct {
+ indexer cache.Indexer
+}
+
+// NewBackupEntryLister returns a new BackupEntryLister.
+func NewBackupEntryLister(indexer cache.Indexer) BackupEntryLister {
+ return &backupEntryLister{indexer: indexer}
+}
+
+// List lists all BackupEntries in the indexer.
+func (s *backupEntryLister) List(selector labels.Selector) (ret []*v1beta1.BackupEntry, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.BackupEntry))
+ })
+ return ret, err
+}
+
+// BackupEntries returns an object that can list and get BackupEntries.
+func (s *backupEntryLister) BackupEntries(namespace string) BackupEntryNamespaceLister {
+ return backupEntryNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// BackupEntryNamespaceLister helps list and get BackupEntries.
+// All objects returned here must be treated as read-only.
+type BackupEntryNamespaceLister interface {
+ // List lists all BackupEntries in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.BackupEntry, err error)
+ // Get retrieves the BackupEntry from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.BackupEntry, error)
+ BackupEntryNamespaceListerExpansion
+}
+
+// backupEntryNamespaceLister implements the BackupEntryNamespaceLister
+// interface.
+type backupEntryNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all BackupEntries in the indexer for a given namespace.
+func (s backupEntryNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.BackupEntry, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.BackupEntry))
+ })
+ return ret, err
+}
+
+// Get retrieves the BackupEntry from the indexer for a given namespace and name.
+func (s backupEntryNamespaceLister) Get(name string) (*v1beta1.BackupEntry, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("backupentry"), name)
+ }
+ return obj.(*v1beta1.BackupEntry), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/cloudprofile.go
new file mode 100644
index 0000000..2769565
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/cloudprofile.go
@@ -0,0 +1,68 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// CloudProfileLister helps list CloudProfiles.
+// All objects returned here must be treated as read-only.
+type CloudProfileLister interface {
+ // List lists all CloudProfiles in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.CloudProfile, err error)
+ // Get retrieves the CloudProfile from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.CloudProfile, error)
+ CloudProfileListerExpansion
+}
+
+// cloudProfileLister implements the CloudProfileLister interface.
+type cloudProfileLister struct {
+ indexer cache.Indexer
+}
+
+// NewCloudProfileLister returns a new CloudProfileLister.
+func NewCloudProfileLister(indexer cache.Indexer) CloudProfileLister {
+ return &cloudProfileLister{indexer: indexer}
+}
+
+// List lists all CloudProfiles in the indexer.
+func (s *cloudProfileLister) List(selector labels.Selector) (ret []*v1beta1.CloudProfile, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.CloudProfile))
+ })
+ return ret, err
+}
+
+// Get retrieves the CloudProfile from the index for a given name.
+func (s *cloudProfileLister) Get(name string) (*v1beta1.CloudProfile, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("cloudprofile"), name)
+ }
+ return obj.(*v1beta1.CloudProfile), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerinstallation.go
new file mode 100644
index 0000000..c165901
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerinstallation.go
@@ -0,0 +1,68 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ControllerInstallationLister helps list ControllerInstallations.
+// All objects returned here must be treated as read-only.
+type ControllerInstallationLister interface {
+ // List lists all ControllerInstallations in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.ControllerInstallation, err error)
+ // Get retrieves the ControllerInstallation from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.ControllerInstallation, error)
+ ControllerInstallationListerExpansion
+}
+
+// controllerInstallationLister implements the ControllerInstallationLister interface.
+type controllerInstallationLister struct {
+ indexer cache.Indexer
+}
+
+// NewControllerInstallationLister returns a new ControllerInstallationLister.
+func NewControllerInstallationLister(indexer cache.Indexer) ControllerInstallationLister {
+ return &controllerInstallationLister{indexer: indexer}
+}
+
+// List lists all ControllerInstallations in the indexer.
+func (s *controllerInstallationLister) List(selector labels.Selector) (ret []*v1beta1.ControllerInstallation, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.ControllerInstallation))
+ })
+ return ret, err
+}
+
+// Get retrieves the ControllerInstallation from the index for a given name.
+func (s *controllerInstallationLister) Get(name string) (*v1beta1.ControllerInstallation, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("controllerinstallation"), name)
+ }
+ return obj.(*v1beta1.ControllerInstallation), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerregistration.go
new file mode 100644
index 0000000..3462b34
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/controllerregistration.go
@@ -0,0 +1,68 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ControllerRegistrationLister helps list ControllerRegistrations.
+// All objects returned here must be treated as read-only.
+type ControllerRegistrationLister interface {
+ // List lists all ControllerRegistrations in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.ControllerRegistration, err error)
+ // Get retrieves the ControllerRegistration from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.ControllerRegistration, error)
+ ControllerRegistrationListerExpansion
+}
+
+// controllerRegistrationLister implements the ControllerRegistrationLister interface.
+type controllerRegistrationLister struct {
+ indexer cache.Indexer
+}
+
+// NewControllerRegistrationLister returns a new ControllerRegistrationLister.
+func NewControllerRegistrationLister(indexer cache.Indexer) ControllerRegistrationLister {
+ return &controllerRegistrationLister{indexer: indexer}
+}
+
+// List lists all ControllerRegistrations in the indexer.
+func (s *controllerRegistrationLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRegistration, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.ControllerRegistration))
+ })
+ return ret, err
+}
+
+// Get retrieves the ControllerRegistration from the index for a given name.
+func (s *controllerRegistrationLister) Get(name string) (*v1beta1.ControllerRegistration, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("controllerregistration"), name)
+ }
+ return obj.(*v1beta1.ControllerRegistration), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/expansion_generated.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/expansion_generated.go
new file mode 100644
index 0000000..102799a
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/expansion_generated.go
@@ -0,0 +1,83 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+// BackupBucketListerExpansion allows custom methods to be added to
+// BackupBucketLister.
+type BackupBucketListerExpansion interface{}
+
+// BackupEntryListerExpansion allows custom methods to be added to
+// BackupEntryLister.
+type BackupEntryListerExpansion interface{}
+
+// BackupEntryNamespaceListerExpansion allows custom methods to be added to
+// BackupEntryNamespaceLister.
+type BackupEntryNamespaceListerExpansion interface{}
+
+// CloudProfileListerExpansion allows custom methods to be added to
+// CloudProfileLister.
+type CloudProfileListerExpansion interface{}
+
+// ControllerInstallationListerExpansion allows custom methods to be added to
+// ControllerInstallationLister.
+type ControllerInstallationListerExpansion interface{}
+
+// ControllerRegistrationListerExpansion allows custom methods to be added to
+// ControllerRegistrationLister.
+type ControllerRegistrationListerExpansion interface{}
+
+// PlantListerExpansion allows custom methods to be added to
+// PlantLister.
+type PlantListerExpansion interface{}
+
+// PlantNamespaceListerExpansion allows custom methods to be added to
+// PlantNamespaceLister.
+type PlantNamespaceListerExpansion interface{}
+
+// ProjectListerExpansion allows custom methods to be added to
+// ProjectLister.
+type ProjectListerExpansion interface{}
+
+// QuotaListerExpansion allows custom methods to be added to
+// QuotaLister.
+type QuotaListerExpansion interface{}
+
+// QuotaNamespaceListerExpansion allows custom methods to be added to
+// QuotaNamespaceLister.
+type QuotaNamespaceListerExpansion interface{}
+
+// SecretBindingListerExpansion allows custom methods to be added to
+// SecretBindingLister.
+type SecretBindingListerExpansion interface{}
+
+// SecretBindingNamespaceListerExpansion allows custom methods to be added to
+// SecretBindingNamespaceLister.
+type SecretBindingNamespaceListerExpansion interface{}
+
+// SeedListerExpansion allows custom methods to be added to
+// SeedLister.
+type SeedListerExpansion interface{}
+
+// ShootListerExpansion allows custom methods to be added to
+// ShootLister.
+type ShootListerExpansion interface{}
+
+// ShootNamespaceListerExpansion allows custom methods to be added to
+// ShootNamespaceLister.
+type ShootNamespaceListerExpansion interface{}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/plant.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/plant.go
new file mode 100644
index 0000000..f0b959f
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/plant.go
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// PlantLister helps list Plants.
+// All objects returned here must be treated as read-only.
+type PlantLister interface {
+ // List lists all Plants in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Plant, err error)
+ // Plants returns an object that can list and get Plants.
+ Plants(namespace string) PlantNamespaceLister
+ PlantListerExpansion
+}
+
+// plantLister implements the PlantLister interface.
+type plantLister struct {
+ indexer cache.Indexer
+}
+
+// NewPlantLister returns a new PlantLister.
+func NewPlantLister(indexer cache.Indexer) PlantLister {
+ return &plantLister{indexer: indexer}
+}
+
+// List lists all Plants in the indexer.
+func (s *plantLister) List(selector labels.Selector) (ret []*v1beta1.Plant, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Plant))
+ })
+ return ret, err
+}
+
+// Plants returns an object that can list and get Plants.
+func (s *plantLister) Plants(namespace string) PlantNamespaceLister {
+ return plantNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// PlantNamespaceLister helps list and get Plants.
+// All objects returned here must be treated as read-only.
+type PlantNamespaceLister interface {
+ // List lists all Plants in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Plant, err error)
+ // Get retrieves the Plant from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.Plant, error)
+ PlantNamespaceListerExpansion
+}
+
+// plantNamespaceLister implements the PlantNamespaceLister
+// interface.
+type plantNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Plants in the indexer for a given namespace.
+func (s plantNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Plant, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Plant))
+ })
+ return ret, err
+}
+
+// Get retrieves the Plant from the indexer for a given namespace and name.
+func (s plantNamespaceLister) Get(name string) (*v1beta1.Plant, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("plant"), name)
+ }
+ return obj.(*v1beta1.Plant), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/project.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/project.go
new file mode 100644
index 0000000..df4c2f3
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/project.go
@@ -0,0 +1,68 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ProjectLister helps list Projects.
+// All objects returned here must be treated as read-only.
+type ProjectLister interface {
+ // List lists all Projects in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Project, err error)
+ // Get retrieves the Project from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.Project, error)
+ ProjectListerExpansion
+}
+
+// projectLister implements the ProjectLister interface.
+type projectLister struct {
+ indexer cache.Indexer
+}
+
+// NewProjectLister returns a new ProjectLister.
+func NewProjectLister(indexer cache.Indexer) ProjectLister {
+ return &projectLister{indexer: indexer}
+}
+
+// List lists all Projects in the indexer.
+func (s *projectLister) List(selector labels.Selector) (ret []*v1beta1.Project, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Project))
+ })
+ return ret, err
+}
+
+// Get retrieves the Project from the index for a given name.
+func (s *projectLister) Get(name string) (*v1beta1.Project, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("project"), name)
+ }
+ return obj.(*v1beta1.Project), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/quota.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/quota.go
new file mode 100644
index 0000000..1459052
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/quota.go
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// QuotaLister helps list Quotas.
+// All objects returned here must be treated as read-only.
+type QuotaLister interface {
+ // List lists all Quotas in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Quota, err error)
+ // Quotas returns an object that can list and get Quotas.
+ Quotas(namespace string) QuotaNamespaceLister
+ QuotaListerExpansion
+}
+
+// quotaLister implements the QuotaLister interface.
+type quotaLister struct {
+ indexer cache.Indexer
+}
+
+// NewQuotaLister returns a new QuotaLister.
+func NewQuotaLister(indexer cache.Indexer) QuotaLister {
+ return "aLister{indexer: indexer}
+}
+
+// List lists all Quotas in the indexer.
+func (s *quotaLister) List(selector labels.Selector) (ret []*v1beta1.Quota, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Quota))
+ })
+ return ret, err
+}
+
+// Quotas returns an object that can list and get Quotas.
+func (s *quotaLister) Quotas(namespace string) QuotaNamespaceLister {
+ return quotaNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// QuotaNamespaceLister helps list and get Quotas.
+// All objects returned here must be treated as read-only.
+type QuotaNamespaceLister interface {
+ // List lists all Quotas in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Quota, err error)
+ // Get retrieves the Quota from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.Quota, error)
+ QuotaNamespaceListerExpansion
+}
+
+// quotaNamespaceLister implements the QuotaNamespaceLister
+// interface.
+type quotaNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Quotas in the indexer for a given namespace.
+func (s quotaNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Quota, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Quota))
+ })
+ return ret, err
+}
+
+// Get retrieves the Quota from the indexer for a given namespace and name.
+func (s quotaNamespaceLister) Get(name string) (*v1beta1.Quota, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("quota"), name)
+ }
+ return obj.(*v1beta1.Quota), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/secretbinding.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/secretbinding.go
new file mode 100644
index 0000000..d9c1922
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/secretbinding.go
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// SecretBindingLister helps list SecretBindings.
+// All objects returned here must be treated as read-only.
+type SecretBindingLister interface {
+ // List lists all SecretBindings in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.SecretBinding, err error)
+ // SecretBindings returns an object that can list and get SecretBindings.
+ SecretBindings(namespace string) SecretBindingNamespaceLister
+ SecretBindingListerExpansion
+}
+
+// secretBindingLister implements the SecretBindingLister interface.
+type secretBindingLister struct {
+ indexer cache.Indexer
+}
+
+// NewSecretBindingLister returns a new SecretBindingLister.
+func NewSecretBindingLister(indexer cache.Indexer) SecretBindingLister {
+ return &secretBindingLister{indexer: indexer}
+}
+
+// List lists all SecretBindings in the indexer.
+func (s *secretBindingLister) List(selector labels.Selector) (ret []*v1beta1.SecretBinding, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.SecretBinding))
+ })
+ return ret, err
+}
+
+// SecretBindings returns an object that can list and get SecretBindings.
+func (s *secretBindingLister) SecretBindings(namespace string) SecretBindingNamespaceLister {
+ return secretBindingNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// SecretBindingNamespaceLister helps list and get SecretBindings.
+// All objects returned here must be treated as read-only.
+type SecretBindingNamespaceLister interface {
+ // List lists all SecretBindings in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.SecretBinding, err error)
+ // Get retrieves the SecretBinding from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.SecretBinding, error)
+ SecretBindingNamespaceListerExpansion
+}
+
+// secretBindingNamespaceLister implements the SecretBindingNamespaceLister
+// interface.
+type secretBindingNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all SecretBindings in the indexer for a given namespace.
+func (s secretBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.SecretBinding, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.SecretBinding))
+ })
+ return ret, err
+}
+
+// Get retrieves the SecretBinding from the indexer for a given namespace and name.
+func (s secretBindingNamespaceLister) Get(name string) (*v1beta1.SecretBinding, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("secretbinding"), name)
+ }
+ return obj.(*v1beta1.SecretBinding), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/seed.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/seed.go
new file mode 100644
index 0000000..a010836
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/seed.go
@@ -0,0 +1,68 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// SeedLister helps list Seeds.
+// All objects returned here must be treated as read-only.
+type SeedLister interface {
+ // List lists all Seeds in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Seed, err error)
+ // Get retrieves the Seed from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.Seed, error)
+ SeedListerExpansion
+}
+
+// seedLister implements the SeedLister interface.
+type seedLister struct {
+ indexer cache.Indexer
+}
+
+// NewSeedLister returns a new SeedLister.
+func NewSeedLister(indexer cache.Indexer) SeedLister {
+ return &seedLister{indexer: indexer}
+}
+
+// List lists all Seeds in the indexer.
+func (s *seedLister) List(selector labels.Selector) (ret []*v1beta1.Seed, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Seed))
+ })
+ return ret, err
+}
+
+// Get retrieves the Seed from the index for a given name.
+func (s *seedLister) Get(name string) (*v1beta1.Seed, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("seed"), name)
+ }
+ return obj.(*v1beta1.Seed), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/shoot.go b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/shoot.go
new file mode 100644
index 0000000..9d42c6d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1/shoot.go
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ShootLister helps list Shoots.
+// All objects returned here must be treated as read-only.
+type ShootLister interface {
+ // List lists all Shoots in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Shoot, err error)
+ // Shoots returns an object that can list and get Shoots.
+ Shoots(namespace string) ShootNamespaceLister
+ ShootListerExpansion
+}
+
+// shootLister implements the ShootLister interface.
+type shootLister struct {
+ indexer cache.Indexer
+}
+
+// NewShootLister returns a new ShootLister.
+func NewShootLister(indexer cache.Indexer) ShootLister {
+ return &shootLister{indexer: indexer}
+}
+
+// List lists all Shoots in the indexer.
+func (s *shootLister) List(selector labels.Selector) (ret []*v1beta1.Shoot, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Shoot))
+ })
+ return ret, err
+}
+
+// Shoots returns an object that can list and get Shoots.
+func (s *shootLister) Shoots(namespace string) ShootNamespaceLister {
+ return shootNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ShootNamespaceLister helps list and get Shoots.
+// All objects returned here must be treated as read-only.
+type ShootNamespaceLister interface {
+ // List lists all Shoots in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.Shoot, err error)
+ // Get retrieves the Shoot from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.Shoot, error)
+ ShootNamespaceListerExpansion
+}
+
+// shootNamespaceLister implements the ShootNamespaceLister
+// interface.
+type shootNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Shoots in the indexer for a given namespace.
+func (s shootNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Shoot, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.Shoot))
+ })
+ return ret, err
+}
+
+// Get retrieves the Shoot from the indexer for a given namespace and name.
+func (s shootNamespaceLister) Get(name string) (*v1beta1.Shoot, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("shoot"), name)
+ }
+ return obj.(*v1beta1.Shoot), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go
new file mode 100644
index 0000000..7d4fb77
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go
new file mode 100644
index 0000000..22a1ace
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ extensionsv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go
new file mode 100644
index 0000000..0095d6d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go
@@ -0,0 +1,94 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "fmt"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ "github.com/Masterminds/semver"
+)
+
+var (
+ defaultPlugins = []gardencorev1beta1.AdmissionPlugin{
+ {Name: "Priority"},
+ {Name: "NamespaceLifecycle"},
+ {Name: "LimitRanger"},
+ {Name: "PodSecurityPolicy"},
+ {Name: "ServiceAccount"},
+ {Name: "NodeRestriction"},
+ {Name: "DefaultStorageClass"},
+ {Name: "DefaultTolerationSeconds"},
+ {Name: "ResourceQuota"},
+ {Name: "StorageObjectInUseProtection"},
+ {Name: "MutatingAdmissionWebhook"},
+ {Name: "ValidatingAdmissionWebhook"},
+ }
+ defaultPluginsWithInitializers = append(defaultPlugins, gardencorev1beta1.AdmissionPlugin{Name: "Initializers"})
+
+ lowestSupportedKubernetesVersionMajorMinor = "1.10"
+ lowestSupportedKubernetesVersion, _ = semver.NewVersion(lowestSupportedKubernetesVersionMajorMinor)
+
+ admissionPlugins = map[string][]gardencorev1beta1.AdmissionPlugin{
+ "1.10": defaultPluginsWithInitializers,
+ "1.11": defaultPluginsWithInitializers,
+ "1.12": defaultPluginsWithInitializers,
+ "1.13": defaultPluginsWithInitializers,
+ "1.14": defaultPlugins,
+ }
+)
+
+// GetAdmissionPluginsForVersion returns the set of default admission plugins for the given Kubernetes version.
+// If the given Kubernetes version does not explicitly define admission plugins the set of names for the next
+// available version will be returned (e.g., for version X not defined the set of version X-1 will be returned).
+func GetAdmissionPluginsForVersion(v string) []gardencorev1beta1.AdmissionPlugin {
+ return copyPlugins(getAdmissionPluginsForVersionInternal(v))
+}
+
+func getAdmissionPluginsForVersionInternal(v string) []gardencorev1beta1.AdmissionPlugin {
+ version, err := semver.NewVersion(v)
+ if err != nil {
+ return admissionPlugins[lowestSupportedKubernetesVersionMajorMinor]
+ }
+
+ if version.LessThan(lowestSupportedKubernetesVersion) {
+ return admissionPlugins[lowestSupportedKubernetesVersionMajorMinor]
+ }
+
+ majorMinor := formatMajorMinor(version.Major(), version.Minor())
+ if pluginsForVersion, ok := admissionPlugins[majorMinor]; ok {
+ return pluginsForVersion
+ }
+
+ // We do not handle decrementing the major part of the version. The reason for this is that we would have to set
+ // the minor part to some higher value which we don't know (assume we go from 2.2->2.1->2.0->1.?). We decided not
+ // to handle decrementing the major part at all, as if Gardener supports Kubernetes 2.X (independent of the fact
+ // that it's anyway unclear when/whether that will come) many parts have to be adapted anyway.
+ return GetAdmissionPluginsForVersion(formatMajorMinor(version.Major(), version.Minor()-1))
+}
+
+func formatMajorMinor(major, minor int64) string {
+ return fmt.Sprintf("%d.%d", major, minor)
+}
+
+func copyPlugins(admissionPlugins []gardencorev1beta1.AdmissionPlugin) []gardencorev1beta1.AdmissionPlugin {
+ dst := make([]gardencorev1beta1.AdmissionPlugin, 0)
+ for _, plugin := range admissionPlugins {
+ pluginPointer := &plugin
+ dst = append(dst, *pluginPointer.DeepCopy())
+ }
+ return dst
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go
new file mode 100644
index 0000000..1718e85
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go
@@ -0,0 +1,432 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+
+ utilerrors "github.com/gardener/gardener/pkg/utils/errors"
+
+ "github.com/hashicorp/go-multierror"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// defaultApplier applies objects by retrieving their current state and then either creating / updating them
+// (update happens with a predefined merge logic).
+type defaultApplier struct {
+ client client.Client
+ restMapper meta.RESTMapper
+}
+
+// NewApplier constructs a new Applier from the given client.
+func NewApplier(c client.Client, restMapper meta.RESTMapper) Applier {
+ return &defaultApplier{client: c, restMapper: restMapper}
+}
+
+// NewApplierForConfig creates a new Applier for the given rest.Config.
+// Use NewApplier if you already have a client and RESTMapper at hand, as this will create a new direct client.
+func NewApplierForConfig(config *rest.Config) (Applier, error) {
+ opts := client.Options{}
+
+ if err := setClientOptionsDefaults(config, &opts); err != nil {
+ return nil, err
+ }
+
+ c, err := NewDirectClient(config, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewApplier(c, opts.Mapper), nil
+}
+
+func (a *defaultApplier) applyObject(ctx context.Context, desired *unstructured.Unstructured, options MergeFuncs) error {
+ // look up scope of objects' kind to check, if we should default the namespace field
+ mapping, err := a.restMapper.RESTMapping(desired.GroupVersionKind().GroupKind(), desired.GroupVersionKind().Version)
+ if err != nil || mapping == nil {
+ // Don't reset RESTMapper in case of cache misses. Most probably indicates, that the corresponding CRD is not yet applied.
+ // CRD might be applied later as part of the same chart
+
+ // default namespace on a best effort basis
+ if desired.GetKind() != "Namespace" && desired.GetNamespace() == "" {
+ desired.SetNamespace(metav1.NamespaceDefault)
+ }
+ } else {
+ if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
+ // default namespace field to `default` in case of namespaced kinds
+ if desired.GetNamespace() == "" {
+ desired.SetNamespace(metav1.NamespaceDefault)
+ }
+ } else {
+ // unset namespace field in case of non-namespaced kinds
+ desired.SetNamespace("")
+ }
+ }
+
+ key := client.ObjectKeyFromObject(desired)
+ if len(key.Name) == 0 {
+ return fmt.Errorf("missing 'metadata.name' in: %+v", desired)
+ }
+
+ current := &unstructured.Unstructured{}
+ current.SetGroupVersionKind(desired.GroupVersionKind())
+ if err = a.client.Get(ctx, key, current); err != nil {
+ if apierrors.IsNotFound(err) {
+ return a.client.Create(ctx, desired)
+ }
+ return err
+ }
+
+ if err := a.mergeObjects(desired, current, options); err != nil {
+ return err
+ }
+
+ return a.client.Update(ctx, desired)
+}
+
+func (a *defaultApplier) deleteObject(ctx context.Context, desired *unstructured.Unstructured, opts *DeleteManifestOptions) error {
+ if desired.GetNamespace() == "" {
+ desired.SetNamespace(metav1.NamespaceDefault)
+ }
+ if len(desired.GetName()) == 0 {
+ return fmt.Errorf("missing 'metadata.name' in: %+v", desired)
+ }
+
+ err := a.client.Delete(ctx, desired)
+ if err != nil {
+ // this is kept for backwards compatibility.
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+
+ for _, tf := range opts.TolerateErrorFuncs {
+ if tf != nil && tf(err) {
+ return nil
+ }
+ }
+ }
+
+ return err
+}
+
+// DefaultMergeFuncs contains options for common k8s objects, e.g. Service, ServiceAccount.
+var (
+ DefaultMergeFuncs = MergeFuncs{
+ corev1.SchemeGroupVersion.WithKind("Service").GroupKind(): func(newObj, oldObj *unstructured.Unstructured) {
+ newSvcType, found, _ := unstructured.NestedString(newObj.Object, "spec", "type")
+ if !found {
+ newSvcType = string(corev1.ServiceTypeClusterIP)
+ _ = unstructured.SetNestedField(newObj.Object, newSvcType, "spec", "type")
+ }
+
+ oldSvcType, found, _ := unstructured.NestedString(oldObj.Object, "spec", "type")
+ if !found {
+ oldSvcType = string(corev1.ServiceTypeClusterIP)
+ }
+
+ switch newSvcType {
+ case string(corev1.ServiceTypeLoadBalancer), string(corev1.ServiceTypeNodePort):
+ oldPorts, found, _ := unstructured.NestedSlice(oldObj.Object, "spec", "ports")
+ if !found {
+ // no old ports probably means that the service was of type External name before.
+ break
+ }
+
+ newPorts, found, _ := unstructured.NestedSlice(newObj.Object, "spec", "ports")
+ if !found {
+ // no new ports is safe to ignore
+ break
+ }
+
+ ports := make([]interface{}, 0, len(newPorts))
+ for _, newPort := range newPorts {
+ np := newPort.(map[string]interface{})
+ npName, _, _ := unstructured.NestedString(np, "name")
+ npPort, _ := nestedFloat64OrInt64(np, "port")
+ nodePort, ok := nestedFloat64OrInt64(np, "nodePort")
+
+ for _, oldPortObj := range oldPorts {
+ op := oldPortObj.(map[string]interface{})
+ opName, _, _ := unstructured.NestedString(op, "name")
+ opPort, _ := nestedFloat64OrInt64(op, "port")
+
+ if (opName == npName || opPort == npPort) && (!ok || nodePort == 0) {
+ np["nodePort"] = op["nodePort"]
+ }
+ }
+
+ ports = append(ports, np)
+ }
+
+ _ = unstructured.SetNestedSlice(newObj.Object, ports, "spec", "ports")
+
+ case string(corev1.ServiceTypeExternalName):
+ // there is no ClusterIP in this case
+ return
+ }
+
+ // ClusterIP is immutable unless that old service is of type ExternalName
+ if oldSvcType != string(corev1.ServiceTypeExternalName) {
+ newClusterIP, _, _ := unstructured.NestedString(newObj.Object, "spec", "clusterIP")
+ if newClusterIP != corev1.ClusterIPNone || newSvcType != string(corev1.ServiceTypeClusterIP) {
+ oldClusterIP, _, _ := unstructured.NestedString(oldObj.Object, "spec", "clusterIP")
+ _ = unstructured.SetNestedField(newObj.Object, oldClusterIP, "spec", "clusterIP")
+ }
+ }
+
+ newETP, _, _ := unstructured.NestedString(newObj.Object, "spec", "externalTrafficPolicy")
+ oldETP, _, _ := unstructured.NestedString(oldObj.Object, "spec", "externalTrafficPolicy")
+
+ if oldSvcType == string(corev1.ServiceTypeLoadBalancer) &&
+ newSvcType == string(corev1.ServiceTypeLoadBalancer) &&
+ newETP == string(corev1.ServiceExternalTrafficPolicyTypeLocal) &&
+ oldETP == string(corev1.ServiceExternalTrafficPolicyTypeLocal) {
+ newHealthCheckPort, _ := nestedFloat64OrInt64(newObj.Object, "spec", "healthCheckNodePort")
+ if newHealthCheckPort == 0 {
+ oldHealthCheckPort, _ := nestedFloat64OrInt64(oldObj.Object, "spec", "healthCheckNodePort")
+ _ = unstructured.SetNestedField(newObj.Object, oldHealthCheckPort, "spec", "healthCheckNodePort")
+ }
+ }
+
+ },
+ corev1.SchemeGroupVersion.WithKind("ServiceAccount").GroupKind(): func(newObj, oldObj *unstructured.Unstructured) {
+ // We do not want to overwrite a ServiceAccount's `.secrets[]` list or `.imagePullSecrets[]`.
+ newObj.Object["secrets"] = oldObj.Object["secrets"]
+ newObj.Object["imagePullSecrets"] = oldObj.Object["imagePullSecrets"]
+ },
+ {Group: "autoscaling.k8s.io", Kind: "VerticalPodAutoscaler"}: func(newObj, oldObj *unstructured.Unstructured) {
+ // Never override the status of VPA resources
+ newObj.Object["status"] = oldObj.Object["status"]
+ },
+ }
+
+ DeploymentKeepReplicasMergeFunc = MergeFunc(func(newObj, oldObj *unstructured.Unstructured) {
+ oldReplicas, ok := nestedFloat64OrInt64(oldObj.Object, "spec", "replicas")
+ if !ok {
+ return
+ }
+ _ = unstructured.SetNestedField(newObj.Object, oldReplicas, "spec", "replicas")
+ })
+)
+
+func nestedFloat64OrInt64(obj map[string]interface{}, fields ...string) (int64, bool) {
+ val, found, err := unstructured.NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return 0, found
+ }
+
+ f, ok := val.(float64)
+ if ok {
+ return int64(f), true
+ }
+
+ i, ok := val.(int64)
+ if ok {
+ return i, true
+ }
+
+ return 0, false
+}
+
+// CopyApplierOptions returns a copies of the provided applier options.
+func CopyApplierOptions(in MergeFuncs) MergeFuncs {
+ out := make(MergeFuncs, len(in))
+
+ for k, v := range in {
+ out[k] = v
+ }
+
+ return out
+}
+
+func (a *defaultApplier) mergeObjects(newObj, oldObj *unstructured.Unstructured, mergeFuncs MergeFuncs) error {
+ newObj.SetResourceVersion(oldObj.GetResourceVersion())
+
+ // We do not want to overwrite the Finalizers.
+ newObj.Object["metadata"].(map[string]interface{})["finalizers"] = oldObj.Object["metadata"].(map[string]interface{})["finalizers"]
+
+ if merge, ok := mergeFuncs[newObj.GroupVersionKind().GroupKind()]; ok {
+ merge(newObj, oldObj)
+ }
+
+ return nil
+}
+
+// ApplyManifest is a function which does the same like `kubectl apply -f `. It takes a bunch of manifests ,
+// all concatenated in a byte slice, and sends them one after the other to the API server. If a resource
+// already exists at the API server, it will update it. It returns an error as soon as the first error occurs.
+func (a *defaultApplier) ApplyManifest(ctx context.Context, r UnstructuredReader, options MergeFuncs) error {
+ allErrs := &multierror.Error{
+ ErrorFormat: utilerrors.NewErrorFormatFuncWithPrefix("failed to apply manifests"),
+ }
+
+ for {
+ obj, err := r.Read()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ allErrs = multierror.Append(allErrs, fmt.Errorf("could not read object: %+v", err))
+ continue
+ }
+ if obj == nil {
+ continue
+ }
+
+ if err := a.applyObject(ctx, obj, options); err != nil {
+ allErrs = multierror.Append(allErrs, fmt.Errorf("could not apply object of kind %q \"%s/%s\": %+v", obj.GetKind(), obj.GetNamespace(), obj.GetName(), err))
+ continue
+ }
+ }
+
+ return allErrs.ErrorOrNil()
+}
+
+// DeleteManifest is a function which does the same like `kubectl delete -f `. It takes a bunch of manifests ,
+// all concatenated in a byte slice, and sends them one after the other to the API server for deletion.
+// It returns an error as soon as the first error occurs.
+func (a *defaultApplier) DeleteManifest(ctx context.Context, r UnstructuredReader, opts ...DeleteManifestOption) error {
+ allErrs := &multierror.Error{
+ ErrorFormat: utilerrors.NewErrorFormatFuncWithPrefix("failed to delete manifests"),
+ }
+
+ deleteOps := &DeleteManifestOptions{}
+
+ for _, o := range opts {
+ if o != nil {
+ o.MutateDeleteManifestOptions(deleteOps)
+ }
+ }
+
+ for {
+ obj, err := r.Read()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ allErrs = multierror.Append(allErrs, fmt.Errorf("could not read object: %+v", err))
+ continue
+ }
+ if obj == nil {
+ continue
+ }
+
+ if err := a.deleteObject(ctx, obj, deleteOps); err != nil {
+ allErrs = multierror.Append(allErrs, fmt.Errorf("could not delete object of kind %q \"%s/%s\": %+v", obj.GetKind(), obj.GetNamespace(), obj.GetName(), err))
+ continue
+ }
+ }
+
+ return allErrs.ErrorOrNil()
+}
+
+// UnstructuredReader an interface that all manifest readers should implement
+type UnstructuredReader interface {
+ Read() (*unstructured.Unstructured, error)
+}
+
+// NewManifestReader initializes a reader for yaml manifests
+func NewManifestReader(manifest []byte) UnstructuredReader {
+ return &manifestReader{
+ decoder: yaml.NewYAMLOrJSONDecoder(bytes.NewReader(manifest), 1024),
+ manifest: manifest,
+ }
+}
+
+// manifestReader is an unstructured reader that contains a JSONDecoder
+type manifestReader struct {
+ decoder *yaml.YAMLOrJSONDecoder
+ manifest []byte
+}
+
+// Read decodes yaml data into an unstructured object
+func (m *manifestReader) Read() (*unstructured.Unstructured, error) {
+ // loop for skipping empty yaml objects
+ for {
+ var data map[string]interface{}
+
+ err := m.decoder.Decode(&data)
+ if err == io.EOF {
+ return nil, err
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error '%+v' decoding manifest: %s", err, string(m.manifest))
+ }
+ if data == nil {
+ continue
+ }
+
+ return &unstructured.Unstructured{Object: data}, nil
+ }
+}
+
+// NewNamespaceSettingReader initializes a reader for yaml manifests with support for setting the namespace
+func NewNamespaceSettingReader(mReader UnstructuredReader, namespace string) UnstructuredReader {
+ return &namespaceSettingReader{
+ reader: mReader,
+ namespace: namespace,
+ }
+}
+
+// namespaceSettingReader is an unstructured reader that contains a JSONDecoder and a manifest reader (or other reader types)
+type namespaceSettingReader struct {
+ reader UnstructuredReader
+ namespace string
+}
+
+// Read decodes yaml data into an unstructured object
+func (n *namespaceSettingReader) Read() (*unstructured.Unstructured, error) {
+ readObj, err := n.reader.Read()
+ if err != nil {
+ return nil, err
+ }
+
+ readObj.SetNamespace(n.namespace)
+
+ return readObj, nil
+}
+
+// NewObjectReferenceReader initializes a reader from ObjectReference
+func NewObjectReferenceReader(objectReference *corev1.ObjectReference) UnstructuredReader {
+ return &objectReferenceReader{
+ objectReference: objectReference,
+ }
+}
+
+// objectReferenceReader is an unstructured reader that contains a ObjectReference
+type objectReferenceReader struct {
+ objectReference *corev1.ObjectReference
+}
+
+// Read translates ObjectReference into Unstructured object
+func (r *objectReferenceReader) Read() (*unstructured.Unstructured, error) {
+ obj := &unstructured.Unstructured{}
+ obj.SetAPIVersion(r.objectReference.APIVersion)
+ obj.SetKind(r.objectReference.Kind)
+ obj.SetNamespace(r.objectReference.Namespace)
+ obj.SetName(r.objectReference.Name)
+
+ return obj, nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go
new file mode 100644
index 0000000..36a33ed
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ "github.com/gardener/gardener/pkg/chartrenderer"
+
+ "k8s.io/client-go/rest"
+)
+
+// ChartApplier is an interface that describes needed methods that render and apply
+// Helm charts in Kubernetes clusters.
+type ChartApplier interface {
+ chartrenderer.Interface
+ Apply(ctx context.Context, chartPath, namespace, name string, opts ...ApplyOption) error
+ Delete(ctx context.Context, chartPath, namespace, name string, opts ...DeleteOption) error
+}
+
+// chartApplier is a structure that contains a chart renderer and a manifest applier.
+type chartApplier struct {
+ chartrenderer.Interface
+ Applier
+}
+
+// NewChartApplier returns a new chart applier.
+func NewChartApplier(renderer chartrenderer.Interface, applier Applier) ChartApplier {
+ return &chartApplier{renderer, applier}
+}
+
+// NewChartApplierForConfig returns a new chart applier based on the given REST config.
+func NewChartApplierForConfig(config *rest.Config) (ChartApplier, error) {
+ renderer, err := chartrenderer.NewForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ applier, err := NewApplierForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ return NewChartApplier(renderer, applier), nil
+}
+
+// Apply takes a path to a chart , name of the release ,
+// release's namespace and renders the template based value.
+// The resulting manifest will be applied to the cluster the Kubernetes client has been created for.
+// can be used to enchance the existing functionality.
+func (c *chartApplier) Apply(ctx context.Context, chartPath, namespace, name string, opts ...ApplyOption) error {
+ applyOpts := &ApplyOptions{}
+
+ for _, o := range opts {
+ if o != nil {
+ o.MutateApplyOptions(applyOpts)
+ }
+ }
+
+ if len(applyOpts.MergeFuncs) == 0 {
+ applyOpts.MergeFuncs = DefaultMergeFuncs
+ }
+
+ manifestReader, err := c.manifestReader(chartPath, namespace, name, applyOpts.Values)
+ if err != nil {
+ return err
+ }
+
+ if applyOpts.ForceNamespace {
+ manifestReader = NewNamespaceSettingReader(manifestReader, namespace)
+ }
+
+ return c.ApplyManifest(ctx, manifestReader, applyOpts.MergeFuncs)
+}
+
+// Delete takes a path to a chart , name of the release ,
+// release's namespace and renders the template.
+// The resulting manifest will be deleted from the cluster the Kubernetes client has been created for.
+func (c *chartApplier) Delete(ctx context.Context, chartPath, namespace, name string, opts ...DeleteOption) error {
+ deleteOpts := &DeleteOptions{}
+
+ for _, o := range opts {
+ if o != nil {
+ o.MutateDeleteOptions(deleteOpts)
+ }
+ }
+
+ manifestReader, err := c.manifestReader(chartPath, namespace, name, deleteOpts.Values)
+ if err != nil {
+ return err
+ }
+
+ if deleteOpts.ForceNamespace {
+ manifestReader = NewNamespaceSettingReader(manifestReader, namespace)
+ }
+
+ deleteManifestOpts := []DeleteManifestOption{}
+
+ for _, tf := range deleteOpts.TolerateErrorFuncs {
+ if tf != nil {
+ deleteManifestOpts = append(deleteManifestOpts, tf)
+ }
+ }
+
+ return c.DeleteManifest(ctx, manifestReader, deleteManifestOpts...)
+}
+
+func (c *chartApplier) manifestReader(chartPath, namespace, name string, values interface{}) (UnstructuredReader, error) {
+ release, err := c.Render(chartPath, name, namespace, values)
+ if err != nil {
+ return nil, err
+ }
+ return NewManifestReader(release.Manifest()), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go
new file mode 100644
index 0000000..438a1ca
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go
@@ -0,0 +1,125 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// ApplyOption is some configuration that modifies options for a apply request.
+type ApplyOption interface {
+ // MutateApplyOptions applies this configuration to the given apply options.
+ MutateApplyOptions(opts *ApplyOptions)
+}
+
+// ApplyOptions contains options for apply requests
+type ApplyOptions struct {
+ // Values to pass to chart.
+ Values interface{}
+
+ // Additional MergeFunctions.
+ MergeFuncs MergeFuncs
+
+ // Forces the namespace for chart objects when applying the chart, this is because sometimes native chart
+ // objects do not come with a Release.Namespace option and leave the namespace field empty
+ ForceNamespace bool
+}
+
+// Values applies values to ApplyOptions or DeleteOptions.
+var Values = func(values interface{}) ValueOption { return &withValue{values} }
+
+type withValue struct {
+ values interface{}
+}
+
+func (v withValue) MutateApplyOptions(opts *ApplyOptions) {
+ opts.Values = v.values
+}
+
+func (v withValue) MutateDeleteOptions(opts *DeleteOptions) {
+ opts.Values = v.values
+}
+
+// MergeFuncs can be used modify the default merge functions for ApplyOptions:
+//
+// Apply(ctx, "chart", "my-ns", "my-release", MergeFuncs{
+// corev1.SchemeGroupVersion.WithKind("Service").GroupKind(): func(newObj, oldObj *unstructured.Unstructured) {
+// newObj.SetAnnotations(map[string]string{"foo":"bar"})
+// }
+// })
+type MergeFuncs map[schema.GroupKind]MergeFunc
+
+// MutateApplyOptions applies this configuration to the given apply options.
+func (m MergeFuncs) MutateApplyOptions(opts *ApplyOptions) {
+ opts.MergeFuncs = m
+}
+
+// ForceNamespace can be used for native chart objects do not come with
+// a Release.Namespace option and leave the namespace field empty.
+var ForceNamespace = forceNamespace{}
+
+type forceNamespace struct{}
+
+func (forceNamespace) MutateApplyOptions(opts *ApplyOptions) {
+ opts.ForceNamespace = true
+}
+
+func (forceNamespace) MutateDeleteOptions(opts *DeleteOptions) {
+ opts.ForceNamespace = true
+}
+
+// ValueOption contains value options for Apply and Delete.
+type ValueOption interface {
+ ApplyOption
+ DeleteOption
+}
+
+// DeleteOption is some configuration that modifies options for a delete request.
+type DeleteOption interface {
+ // MutateDeleteOptions applies this configuration to the given delete options.
+ MutateDeleteOptions(opts *DeleteOptions)
+}
+
+// DeleteOptions contains options for delete requests
+type DeleteOptions struct {
+ // Values to pass to chart.
+ Values interface{}
+
+ // Forces the namespace for chart objects when applying the chart, this is because sometimes native chart
+ // objects do not come with a Release.Namespace option and leave the namespace field empty
+ ForceNamespace bool
+
+ // TolerateErrorFuncs are functions for which errors are tolerated.
+ TolerateErrorFuncs []TolerateErrorFunc
+}
+
+// TolerateErrorFunc is a function for which err is tolerated.
+type TolerateErrorFunc func(err error) bool
+
+func (t TolerateErrorFunc) MutateDeleteOptions(opts *DeleteOptions) {
+ if opts.TolerateErrorFuncs == nil {
+ opts.TolerateErrorFuncs = []TolerateErrorFunc{}
+ }
+
+ opts.TolerateErrorFuncs = append(opts.TolerateErrorFuncs, t)
+}
+
+func (t TolerateErrorFunc) MutateDeleteManifestOptions(opts *DeleteManifestOptions) {
+ if opts.TolerateErrorFuncs == nil {
+ opts.TolerateErrorFuncs = []TolerateErrorFunc{}
+ }
+
+ opts.TolerateErrorFuncs = append(opts.TolerateErrorFuncs, t)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go
new file mode 100644
index 0000000..55e60fb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go
@@ -0,0 +1,319 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ versionutils "github.com/gardener/gardener/pkg/utils/version"
+
+ corev1 "k8s.io/api/core/v1"
+ apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ kubernetesclientset "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ componentbaseconfig "k8s.io/component-base/config"
+ apiserviceclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var (
+ // UseCachedRuntimeClients is a flag for enabling cached controller-runtime clients (defaults to false).
+ // If enabled, the client returned by Interface.Client() will be backed by a cache, otherwise it will be the same
+ // client that will be returned by Interface.DirectClient().
+ UseCachedRuntimeClients = false
+)
+
+// KubeConfig is the key to the kubeconfig
+const KubeConfig = "kubeconfig"
+
+// NewClientFromFile creates a new Client struct for a given kubeconfig. The kubeconfig will be
+// read from the filesystem at location . If given, overrides the
+// master URL in the kubeconfig.
+// If no filepath is given, the in-cluster configuration will be taken into account.
+func NewClientFromFile(masterURL, kubeconfigPath string, fns ...ConfigFunc) (Interface, error) {
+ if kubeconfigPath == "" && masterURL == "" {
+ kubeconfig, err := rest.InClusterConfig()
+ if err != nil {
+ return nil, err
+ }
+ opts := append([]ConfigFunc{WithRESTConfig(kubeconfig)}, fns...)
+ return NewWithConfig(opts...)
+ }
+
+ clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
+ &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
+ &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterURL}},
+ )
+
+ if err := validateClientConfig(clientConfig); err != nil {
+ return nil, err
+ }
+
+ config, err := clientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ opts := append([]ConfigFunc{WithRESTConfig(config)}, fns...)
+ return NewWithConfig(opts...)
+}
+
+// NewClientFromBytes creates a new Client struct for a given kubeconfig byte slice.
+func NewClientFromBytes(kubeconfig []byte, fns ...ConfigFunc) (Interface, error) {
+ config, err := RESTConfigFromClientConnectionConfiguration(nil, kubeconfig)
+ if err != nil {
+ return nil, err
+ }
+
+ opts := append([]ConfigFunc{WithRESTConfig(config)}, fns...)
+ return NewWithConfig(opts...)
+}
+
+// NewClientFromSecret creates a new Client struct for a given kubeconfig stored as a
+// Secret in an existing Kubernetes cluster. This cluster will be accessed by the . It will
+// read the Secret in . The Secret must contain a field "kubeconfig" which will
+// be used.
+func NewClientFromSecret(ctx context.Context, c client.Client, namespace, secretName string, fns ...ConfigFunc) (Interface, error) {
+ secret := &corev1.Secret{}
+ if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: secretName}, secret); err != nil {
+ return nil, err
+ }
+ return NewClientFromSecretObject(secret, fns...)
+}
+
+// NewClientFromSecretObject creates a new Client struct for a given Kubernetes Secret object. The Secret must
+// contain a field "kubeconfig" which will be used.
+func NewClientFromSecretObject(secret *corev1.Secret, fns ...ConfigFunc) (Interface, error) {
+ if kubeconfig, ok := secret.Data[KubeConfig]; ok {
+ if len(kubeconfig) == 0 {
+ return nil, errors.New("the secret's field 'kubeconfig' is empty")
+ }
+
+ return NewClientFromBytes(kubeconfig, fns...)
+ }
+ return nil, errors.New("the secret does not contain a field with name 'kubeconfig'")
+}
+
+// RESTConfigFromClientConnectionConfiguration creates a *rest.Config from a componentbaseconfig.ClientConnectionConfiguration & the configured kubeconfig
+func RESTConfigFromClientConnectionConfiguration(cfg *componentbaseconfig.ClientConnectionConfiguration, kubeconfig []byte) (*rest.Config, error) {
+ var (
+ restConfig *rest.Config
+ err error
+ )
+
+ if kubeconfig == nil {
+ clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
+ &clientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Kubeconfig},
+ &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: ""}},
+ )
+
+ if err := validateClientConfig(clientConfig); err != nil {
+ return nil, err
+ }
+
+ restConfig, err = clientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ restConfig, err = RESTConfigFromKubeconfig(kubeconfig)
+ if err != nil {
+ return restConfig, err
+ }
+ }
+
+ if cfg != nil {
+ restConfig.Burst = int(cfg.Burst)
+ restConfig.QPS = cfg.QPS
+ restConfig.AcceptContentTypes = cfg.AcceptContentTypes
+ restConfig.ContentType = cfg.ContentType
+ }
+
+ return restConfig, nil
+}
+
+// RESTConfigFromKubeconfig returns a rest.Config from the bytes of a kubeconfig
+func RESTConfigFromKubeconfig(kubeconfig []byte) (*rest.Config, error) {
+ clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := validateClientConfig(clientConfig); err != nil {
+ return nil, err
+ }
+
+ restConfig, err := clientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ return restConfig, nil
+}
+
+func validateClientConfig(clientConfig clientcmd.ClientConfig) error {
+ rawConfig, err := clientConfig.RawConfig()
+ if err != nil {
+ return err
+ }
+ return ValidateConfig(rawConfig)
+}
+
+// ValidateConfig validates that the auth info of a given kubeconfig doesn't have unsupported fields.
+func ValidateConfig(config clientcmdapi.Config) error {
+ validFields := []string{"client-certificate-data", "client-key-data", "token", "username", "password"}
+
+ for user, authInfo := range config.AuthInfos {
+ switch {
+ case authInfo.ClientCertificate != "":
+ return fmt.Errorf("client certificate files are not supported (user %q), these are the valid fields: %+v", user, validFields)
+ case authInfo.ClientKey != "":
+ return fmt.Errorf("client key files are not supported (user %q), these are the valid fields: %+v", user, validFields)
+ case authInfo.TokenFile != "":
+ return fmt.Errorf("token files are not supported (user %q), these are the valid fields: %+v", user, validFields)
+ case authInfo.Impersonate != "" || len(authInfo.ImpersonateGroups) > 0:
+ return fmt.Errorf("impersonation is not supported, these are the valid fields: %+v", validFields)
+ case authInfo.AuthProvider != nil && len(authInfo.AuthProvider.Config) > 0:
+ return fmt.Errorf("auth provider configurations are not supported (user %q), these are the valid fields: %+v", user, validFields)
+ case authInfo.Exec != nil:
+ return fmt.Errorf("exec configurations are not supported (user %q), these are the valid fields: %+v", user, validFields)
+ }
+ }
+
+ return nil
+}
+
+var supportedKubernetesVersions = []string{
+ "1.10",
+ "1.11",
+ "1.12",
+ "1.13",
+ "1.14",
+ "1.15",
+ "1.16",
+ "1.17",
+ "1.18",
+ "1.19",
+ "1.20",
+}
+
+func checkIfSupportedKubernetesVersion(gitVersion string) error {
+ for _, supportedVersion := range supportedKubernetesVersions {
+ ok, err := versionutils.CompareVersions(gitVersion, "~", supportedVersion)
+ if err != nil {
+ return err
+ }
+
+ if ok {
+ return nil
+ }
+ }
+ return fmt.Errorf("unsupported kubernetes version %q", gitVersion)
+}
+
+// NewWithConfig returns a new Kubernetes base client.
+func NewWithConfig(fns ...ConfigFunc) (Interface, error) {
+ conf := &Config{}
+
+ for _, f := range fns {
+ if err := f(conf); err != nil {
+ return nil, err
+ }
+ }
+
+ return newClientSet(conf)
+}
+
+func newClientSet(conf *Config) (Interface, error) {
+ if err := setConfigDefaults(conf); err != nil {
+ return nil, err
+ }
+
+ runtimeCache, err := NewRuntimeCache(conf.restConfig, cache.Options{
+ Scheme: conf.clientOptions.Scheme,
+ Mapper: conf.clientOptions.Mapper,
+ Resync: conf.cacheResync,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ directClient, err := client.New(conf.restConfig, conf.clientOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ var runtimeClient client.Client
+ if UseCachedRuntimeClients && !conf.disableCache {
+ runtimeClient, err = newRuntimeClientWithCache(conf.restConfig, conf.clientOptions, runtimeCache, conf.uncachedObjects...)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ runtimeClient = directClient
+ }
+
+ kubernetes, err := kubernetesclientset.NewForConfig(conf.restConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ gardenCore, err := gardencoreclientset.NewForConfig(conf.restConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ apiRegistration, err := apiserviceclientset.NewForConfig(conf.restConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ apiExtension, err := apiextensionsclientset.NewForConfig(conf.restConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ cs := &clientSet{
+ config: conf.restConfig,
+ restMapper: conf.clientOptions.Mapper,
+ restClient: kubernetes.Discovery().RESTClient(),
+
+ applier: NewApplier(runtimeClient, conf.clientOptions.Mapper),
+
+ client: runtimeClient,
+ directClient: directClient,
+ cache: runtimeCache,
+
+ kubernetes: kubernetes,
+ gardenCore: gardenCore,
+ apiregistration: apiRegistration,
+ apiextension: apiExtension,
+ }
+
+ if _, err := cs.DiscoverVersion(); err != nil {
+ return nil, fmt.Errorf("error discovering kubernetes version: %w", err)
+ }
+
+ return cs, nil
+}
+
+func setConfigDefaults(conf *Config) error {
+ return setClientOptionsDefaults(conf.restConfig, &conf.clientOptions)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go
new file mode 100644
index 0000000..7ab0857
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "sync"
+
+ "github.com/gardener/gardener/pkg/chartrenderer"
+ gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ "github.com/gardener/gardener/pkg/logger"
+
+ apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/version"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ apiregistrationclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// clientSet is a struct containing the configuration for the respective Kubernetes
+// cluster, the collection of Kubernetes clients containing all REST clients
+// for the built-in Kubernetes API groups, and the Garden which is a REST clientSet
+// for the Garden API group.
+// The RESTClient itself is a normal HTTP client for the respective Kubernetes cluster,
+// allowing requests to arbitrary URLs.
+// The version string contains only the major/minor part in the form ..
+type clientSet struct {
+ config *rest.Config
+ restMapper meta.RESTMapper
+ restClient rest.Interface
+
+ applier Applier
+ chartApplier ChartApplier
+ chartRenderer chartrenderer.Interface
+
+ // client is the default controller-runtime client which uses SharedIndexInformers to keep its cache in sync
+ client client.Client
+ // directClient is a client which can be used to make requests directly to the API server instead of reading from
+ // the client's cache
+ directClient client.Client
+ // cache is the client's cache
+ cache cache.Cache
+
+ // startOnce guards starting the cache only once
+ startOnce sync.Once
+
+ kubernetes kubernetes.Interface
+ gardenCore gardencoreclientset.Interface
+ apiextension apiextensionclientset.Interface
+ apiregistration apiregistrationclientset.Interface
+
+ version string
+}
+
+// Applier returns the Applier of this ClientSet.
+func (c *clientSet) Applier() Applier {
+ return c.applier
+}
+
+// ChartRenderer returns a ChartRenderer populated with the cluster's Capabilities.
+func (c *clientSet) ChartRenderer() chartrenderer.Interface {
+ return c.chartRenderer
+}
+
+// ChartApplier returns a ChartApplier using the ClientSet's ChartRenderer and Applier.
+func (c *clientSet) ChartApplier() ChartApplier {
+ return c.chartApplier
+}
+
+// RESTConfig will return the config attribute of the Client object.
+func (c *clientSet) RESTConfig() *rest.Config {
+ return c.config
+}
+
+// Client returns the controller-runtime client of this ClientSet.
+func (c *clientSet) Client() client.Client {
+ return c.client
+}
+
+// DirectClient returns a controller-runtime client, which can be used to talk to the API server directly
+// (without using a cache).
+func (c *clientSet) DirectClient() client.Client {
+ return c.directClient
+}
+
+// Cache returns the ClientSet's controller-runtime cache. It can be used to get Informers for arbitrary objects.
+func (c *clientSet) Cache() cache.Cache {
+ return c.cache
+}
+
+// RESTMapper returns the restMapper of this ClientSet.
+func (c *clientSet) RESTMapper() meta.RESTMapper {
+ return c.restMapper
+}
+
+// Kubernetes will return the kubernetes attribute of the Client object.
+func (c *clientSet) Kubernetes() kubernetes.Interface {
+ return c.kubernetes
+}
+
+// GardenCore will return the gardenCore attribute of the Client object.
+func (c *clientSet) GardenCore() gardencoreclientset.Interface {
+ return c.gardenCore
+}
+
+// APIExtension will return the apiextensions attribute of the Client object.
+func (c *clientSet) APIExtension() apiextensionclientset.Interface {
+ return c.apiextension
+}
+
+// APIRegistration will return the apiregistration attribute of the Client object.
+func (c *clientSet) APIRegistration() apiregistrationclientset.Interface {
+ return c.apiregistration
+}
+
+// RESTClient will return the restClient attribute of the Client object.
+func (c *clientSet) RESTClient() rest.Interface {
+ return c.restClient
+}
+
+// Version returns the GitVersion of the Kubernetes client stored on the object.
+func (c *clientSet) Version() string {
+ return c.version
+}
+
+// DiscoverVersion tries to retrieve the server version of the targeted Kubernetes cluster and updates the
+// ClientSet's saved version accordingly. Use Version if you only want to retrieve the kubernetes version instead
+// of refreshing the ClientSet's saved version.
+func (c *clientSet) DiscoverVersion() (*version.Info, error) {
+ serverVersion, err := c.kubernetes.Discovery().ServerVersion()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkIfSupportedKubernetesVersion(serverVersion.GitVersion); err != nil {
+ return nil, err
+ }
+
+ c.version = serverVersion.GitVersion
+ c.chartRenderer = chartrenderer.NewWithServerVersion(serverVersion)
+ c.chartApplier = NewChartApplier(c.chartRenderer, c.applier)
+
+ return serverVersion, nil
+}
+
+// Start starts the cache of the ClientSet's controller-runtime client and returns immediately.
+// It must be called first before using the client to retrieve objects from the API server.
+func (c *clientSet) Start(ctx context.Context) {
+ c.startOnce.Do(func() {
+ go func() {
+ if err := c.cache.Start(ctx); err != nil {
+ logger.Logger.Errorf("cache.Start returned error, which should never happen, ignoring.")
+ }
+ }()
+ })
+}
+
+// WaitForCacheSync waits for the cache of the ClientSet's controller-runtime client to be synced.
+func (c *clientSet) WaitForCacheSync(ctx context.Context) bool {
+ return c.cache.WaitForCacheSync(ctx)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go
new file mode 100644
index 0000000..fef83f6
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/gardener/gardener/pkg/utils/retry"
+
+ appsv1 "k8s.io/api/apps/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// HasDeploymentRolloutCompleted checks for the number of updated &
+// available replicas to be equal to the deployment's desired replicas count.
+// Thus confirming a successful rollout of the deployment.
+func HasDeploymentRolloutCompleted(ctx context.Context, c client.Client, namespace, name string) (bool, error) {
+ var (
+ deployment = &appsv1.Deployment{}
+ desiredReplicas = int32(0)
+ )
+
+ if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, deployment); err != nil {
+ return retry.SevereError(err)
+ }
+
+ if deployment.Spec.Replicas != nil {
+ desiredReplicas = *deployment.Spec.Replicas
+ }
+
+ if deployment.Generation != deployment.Status.ObservedGeneration {
+ return retry.MinorError(fmt.Errorf("%q not observed at latest generation (%d/%d)", name,
+ deployment.Status.ObservedGeneration, deployment.Generation))
+ }
+
+ if deployment.Status.Replicas == desiredReplicas && deployment.Status.UpdatedReplicas == desiredReplicas && deployment.Status.AvailableReplicas == desiredReplicas {
+ return retry.Ok()
+ }
+
+ return retry.MinorError(fmt.Errorf("deployment %q currently has Updated/Available: %d/%d replicas. Desired: %d", name, deployment.Status.UpdatedReplicas, deployment.Status.AvailableReplicas, desiredReplicas))
+}
+
+// WaitUntilDeploymentRolloutIsComplete waits for the number of updated &
+// available replicas to be equal to the deployment's desired replicas count.
+// It keeps retrying until timeout
+func WaitUntilDeploymentRolloutIsComplete(ctx context.Context, client client.Client, namespace string, name string, interval, timeout time.Duration) error {
+ return retry.UntilTimeout(ctx, interval, timeout, func(ctx context.Context) (done bool, err error) {
+ return HasDeploymentRolloutCompleted(ctx, client, namespace, name)
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go
new file mode 100644
index 0000000..0dc9aff
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+// DeleteManifestOption is some configuration that modifies options for a delete request.
+type DeleteManifestOption interface {
+ // MutateDeleteOptions applies this configuration to the given delete options.
+ MutateDeleteManifestOptions(opts *DeleteManifestOptions)
+}
+
+// DeleteOptions contains options for delete requests
+type DeleteManifestOptions struct {
+ // TolerateErrorFuncs are functions for which errors are tolerated.
+ TolerateErrorFuncs []TolerateErrorFunc
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go
new file mode 100644
index 0000000..842aacb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "errors"
+ "time"
+
+ "k8s.io/client-go/rest"
+ baseconfig "k8s.io/component-base/config"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Config carries options for new ClientSets.
+type Config struct {
+ clientOptions client.Options
+ restConfig *rest.Config
+ cacheResync *time.Duration
+ disableCache bool
+ uncachedObjects []client.Object
+}
+
+// NewConfig returns a new Config with an empty REST config to allow testing ConfigFuncs without exporting
+// the fields of the Config type.
+func NewConfig() *Config {
+ return &Config{restConfig: &rest.Config{}}
+}
+
+// ConfigFunc is a function that mutates a Config struct.
+// It implements the functional options pattern. See
+// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md.
+type ConfigFunc func(config *Config) error
+
+// WithRESTConfig returns a ConfigFunc that sets the passed rest.Config on the Config object.
+func WithRESTConfig(restConfig *rest.Config) ConfigFunc {
+ return func(config *Config) error {
+ config.restConfig = restConfig
+ return nil
+ }
+}
+
+// WithClientConnectionOptions returns a ConfigFunc that transfers settings from
+// the passed ClientConnectionConfiguration.
+// The kubeconfig location in ClientConnectionConfiguration is disregarded, though!
+func WithClientConnectionOptions(cfg baseconfig.ClientConnectionConfiguration) ConfigFunc {
+ return func(config *Config) error {
+ if config.restConfig == nil {
+ return errors.New("REST config must be set before setting connection options")
+ }
+ config.restConfig.Burst = int(cfg.Burst)
+ config.restConfig.QPS = cfg.QPS
+ config.restConfig.AcceptContentTypes = cfg.AcceptContentTypes
+ config.restConfig.ContentType = cfg.ContentType
+ return nil
+ }
+}
+
+// WithClientOptions returns a ConfigFunc that sets the passed Options on the Config object.
+func WithClientOptions(opt client.Options) ConfigFunc {
+ return func(config *Config) error {
+ config.clientOptions = opt
+ return nil
+ }
+}
+
+// WithCacheResyncPeriod returns a ConfigFunc that set the client's cache's resync period to the given duration.
+func WithCacheResyncPeriod(resync time.Duration) ConfigFunc {
+ return func(config *Config) error {
+ config.cacheResync = &resync
+ return nil
+ }
+}
+
+// WithDisabledCachedClient disables the cache in the controller-runtime client, so Client() will be equivalent to
+// DirectClient().
+func WithDisabledCachedClient() ConfigFunc {
+ return func(config *Config) error {
+ config.disableCache = true
+ return nil
+ }
+}
+
+// WithUncached disables the cached client for the specified objects' GroupKinds.
+func WithUncached(objs ...client.Object) ConfigFunc {
+ return func(config *Config) error {
+ config.uncachedObjects = append(config.uncachedObjects, objs...)
+ return nil
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go
new file mode 100644
index 0000000..f329bf7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/gardener/gardener/pkg/utils"
+
+ corev1 "k8s.io/api/core/v1"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/portforward"
+ "k8s.io/client-go/tools/remotecommand"
+ "k8s.io/client-go/transport/spdy"
+)
+
+// NewPodExecutor returns a podExecutor
+func NewPodExecutor(config *rest.Config) PodExecutor {
+ return &podExecutor{
+ config: config,
+ }
+}
+
+// PodExecutor is the pod executor interface
+type PodExecutor interface {
+ Execute(namespace, name, containerName, command, commandArg string) (io.Reader, error)
+}
+
+type podExecutor struct {
+ config *rest.Config
+}
+
+// Execute executes a command on a pod
+func (p *podExecutor) Execute(namespace, name, containerName, command, commandArg string) (io.Reader, error) {
+ client, err := corev1client.NewForConfig(p.config)
+ if err != nil {
+ return nil, err
+ }
+
+ var stdout, stderr bytes.Buffer
+ request := client.RESTClient().
+ Post().
+ Resource("pods").
+ Name(name).
+ Namespace(namespace).
+ SubResource("exec").
+ Param("container", containerName).
+ Param("command", command).
+ Param("stdin", "true").
+ Param("stdout", "true").
+ Param("stderr", "true").
+ Param("tty", "false")
+
+ executor, err := remotecommand.NewSPDYExecutor(p.config, http.MethodPost, request.URL())
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialized the command exector: %v", err)
+ }
+
+ err = executor.Stream(remotecommand.StreamOptions{
+ Stdin: strings.NewReader(commandArg),
+ Stdout: &stdout,
+ Stderr: &stderr,
+ Tty: false,
+ })
+ if err != nil {
+ return &stderr, err
+ }
+
+ return &stdout, nil
+}
+
+// GetPodLogs retrieves the pod logs of the pod of the given name with the given options.
+func GetPodLogs(ctx context.Context, podInterface corev1client.PodInterface, name string, options *corev1.PodLogOptions) ([]byte, error) {
+ request := podInterface.GetLogs(name, options)
+
+ stream, err := request.Stream(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { utilruntime.HandleError(stream.Close()) }()
+
+ return ioutil.ReadAll(stream)
+}
+
+// ForwardPodPort tries to forward the port of the pod with name in namespace to
+// the port. If equals zero, a free port will be chosen randomly.
+// It returns the stop channel which must be closed when the port forward connection should be terminated.
+func (c *clientSet) ForwardPodPort(namespace, name string, local, remote int) (chan struct{}, error) {
+ fw, stopChan, err := c.setupForwardPodPort(namespace, name, local, remote)
+ if err != nil {
+ return nil, err
+ }
+ return stopChan, fw.ForwardPorts()
+}
+
+// CheckForwardPodPort tries to forward the port of the pod with name in namespace to
+// the port. If equals zero, a free port will be chosen randomly.
+// It returns true if the port forward connection has been established successfully or false otherwise.
+func (c *clientSet) CheckForwardPodPort(namespace, name string, local, remote int) error {
+ fw, stopChan, err := c.setupForwardPodPort(namespace, name, local, remote)
+ if err != nil {
+ return fmt.Errorf("could not setup pod port forwarding: %v", err)
+ }
+
+ errChan := make(chan error)
+ go func() {
+ errChan <- fw.ForwardPorts()
+ }()
+ defer close(stopChan)
+
+ select {
+ case err = <-errChan:
+ return fmt.Errorf("error forwarding ports: %v", err)
+ case <-fw.Ready:
+ return nil
+ case <-time.After(time.Second * 5):
+ return errors.New("port forward connection could not be established within five seconds")
+ }
+}
+
+func (c *clientSet) setupForwardPodPort(namespace, name string, local, remote int) (*portforward.PortForwarder, chan struct{}, error) {
+ var (
+ stopChan = make(chan struct{}, 1)
+ readyChan = make(chan struct{}, 1)
+ out = ioutil.Discard
+ localPort int
+ )
+
+ u := c.kubernetes.CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(name).SubResource("portforward").URL()
+
+ transport, upgrader, err := spdy.RoundTripperFor(c.config)
+ if err != nil {
+ return nil, nil, err
+ }
+ dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", u)
+
+ if local == 0 {
+ localPort, err = utils.FindFreePort()
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ fw, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", localPort, remote)}, stopChan, readyChan, out, out)
+ if err != nil {
+ return nil, nil, err
+ }
+ return fw, stopChan, nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go
new file mode 100644
index 0000000..e83e41c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+
+ "github.com/gardener/gardener/pkg/logger"
+ "github.com/gardener/gardener/pkg/mock/go/context"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/time/rate"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+)
+
+const (
+ defaultCacheResyncPeriod = 6 * time.Hour
+)
+
+// NewDirectClient creates a new client.Client which can be used to talk to the API directly (without a cache).
+func NewDirectClient(config *rest.Config, options client.Options) (client.Client, error) {
+ if err := setClientOptionsDefaults(config, &options); err != nil {
+ return nil, err
+ }
+
+ return client.New(config, options)
+}
+
+// NewRuntimeClientWithCache creates a new client.client with the given config and options.
+// The client uses a new cache, which will be started immediately using the given context.
+func NewRuntimeClientWithCache(ctx context.Context, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
+ if err := setClientOptionsDefaults(config, &options); err != nil {
+ return nil, err
+ }
+
+ clientCache, err := NewRuntimeCache(config, cache.Options{
+ Scheme: options.Scheme,
+ Mapper: options.Mapper,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not create new client cache: %w", err)
+ }
+
+ runtimeClient, err := newRuntimeClientWithCache(config, options, clientCache, uncachedObjects...)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ if err := clientCache.Start(ctx); err != nil {
+ logger.NewLogger(fmt.Sprint(logrus.ErrorLevel)).Errorf("cache.Start returned error, which should never happen, ignoring.")
+ }
+ }()
+
+ clientCache.WaitForCacheSync(ctx)
+
+ return runtimeClient, nil
+}
+
+func newRuntimeClientWithCache(config *rest.Config, options client.Options, cache cache.Cache, uncachedObjects ...client.Object) (client.Client, error) {
+ return manager.NewClientBuilder().WithUncached(uncachedObjects...).Build(cache, config, options)
+}
+
+func setClientOptionsDefaults(config *rest.Config, options *client.Options) error {
+ if options.Mapper == nil {
+ // default the client's REST mapper to a dynamic REST mapper (automatically rediscovers resources on NoMatchErrors)
+ mapper, err := apiutil.NewDynamicRESTMapper(
+ config,
+ apiutil.WithLazyDiscovery,
+ apiutil.WithLimiter(rate.NewLimiter(rate.Every(5*time.Second), 1)),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to create new DynamicRESTMapper: %w", err)
+ }
+ options.Mapper = mapper
+ }
+
+ return nil
+}
+
+// NewRuntimeCache creates a new cache.Cache with the given config and options. It can be used
+// for creating new controller-runtime clients with caches.
+func NewRuntimeCache(config *rest.Config, options cache.Options) (cache.Cache, error) {
+ if err := setCacheOptionsDefaults(&options); err != nil {
+ return nil, err
+ }
+
+ return cache.New(config, options)
+}
+
+func setCacheOptionsDefaults(options *cache.Options) error {
+ if options.Resync == nil {
+ resync := defaultCacheResyncPeriod
+ options.Resync = &resync
+ }
+
+ return nil
+}
+
+// NewDirectClientFromSecret creates a new controller runtime Client struct for a given secret.
+func NewDirectClientFromSecret(secret *corev1.Secret, fns ...ConfigFunc) (client.Client, error) {
+ if kubeconfig, ok := secret.Data[KubeConfig]; ok {
+ return NewDirectClientFromBytes(kubeconfig, fns...)
+ }
+ return nil, errors.New("no valid kubeconfig found")
+}
+
+// NewDirectClientFromBytes creates a new controller runtime Client struct for a given kubeconfig byte slice.
+func NewDirectClientFromBytes(kubeconfig []byte, fns ...ConfigFunc) (client.Client, error) {
+ clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := validateClientConfig(clientConfig); err != nil {
+ return nil, err
+ }
+
+ config, err := clientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ opts := append([]ConfigFunc{WithRESTConfig(config)}, fns...)
+ return NewDirectClientWithConfig(opts...)
+}
+
+// NewDirectClientWithConfig returns a new controller runtime client from a config.
+func NewDirectClientWithConfig(fns ...ConfigFunc) (client.Client, error) {
+ conf := &Config{}
+ for _, f := range fns {
+ if err := f(conf); err != nil {
+ return nil, err
+ }
+ }
+ return NewDirectClient(conf.restConfig, conf.clientOptions)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go
new file mode 100644
index 0000000..fa13c90
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+
+ druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1"
+
+ appsv1 "k8s.io/api/apps/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// ScaleStatefulSet scales a StatefulSet.
+func ScaleStatefulSet(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error {
+ statefulset := &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key.Name,
+ Namespace: key.Namespace,
+ },
+ }
+
+ return scaleResource(ctx, c, statefulset, replicas)
+}
+
+// ScaleEtcd scales a Etcd resource.
+func ScaleEtcd(ctx context.Context, c client.Client, key client.ObjectKey, replicas int) error {
+ etcd := &druidv1alpha1.Etcd{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key.Name,
+ Namespace: key.Namespace,
+ },
+ }
+
+ return scaleResource(ctx, c, etcd, int32(replicas))
+}
+
+// ScaleDeployment scales a Deployment.
+func ScaleDeployment(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error {
+ deployment := &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key.Name,
+ Namespace: key.Namespace,
+ },
+ }
+
+ return scaleResource(ctx, c, deployment, replicas)
+}
+
+// scaleResource scales resource's 'spec.replicas' to replicas count
+func scaleResource(ctx context.Context, c client.Client, obj client.Object, replicas int32) error {
+ patch := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas))
+
+ // TODO: replace this with call to scale subresource once controller-runtime supports it
+ // see: https://github.com/kubernetes-sigs/controller-runtime/issues/172
+ return c.Patch(ctx, obj, client.RawPatch(types.MergePatchType, patch))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go
new file mode 100644
index 0000000..7da1186
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ "github.com/gardener/gardener/pkg/chartrenderer"
+ gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ gardencorescheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme"
+ gardenextensionsscheme "github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme"
+
+ druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1"
+ dnsv1alpha1 "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1"
+ resourcesscheme "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1"
+ hvpav1alpha1 "github.com/gardener/hvpa-controller/api/v1alpha1"
+ apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/version"
+ autoscalingscheme "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
+ kubernetesclientset "k8s.io/client-go/kubernetes"
+ corescheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ apiregistrationclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
+ apiregistrationscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var (
+ // GardenScheme is the scheme used in the Garden cluster.
+ GardenScheme = runtime.NewScheme()
+ // SeedScheme is the scheme used in the Seed cluster.
+ SeedScheme = runtime.NewScheme()
+ // ShootScheme is the scheme used in the Shoot cluster.
+ ShootScheme = runtime.NewScheme()
+ // PlantScheme is the scheme used in the Plant cluster
+ PlantScheme = runtime.NewScheme()
+
+ // DefaultDeleteOptions use foreground propagation policy and grace period of 60 seconds.
+ DefaultDeleteOptions = []client.DeleteOption{
+ client.PropagationPolicy(metav1.DeletePropagationForeground),
+ client.GracePeriodSeconds(60),
+ }
+ // ForceDeleteOptions use background propagation policy and grace period of 0 seconds.
+ ForceDeleteOptions = []client.DeleteOption{
+ client.PropagationPolicy(metav1.DeletePropagationBackground),
+ client.GracePeriodSeconds(0),
+ }
+
+ // SeedSerializer is a YAML serializer using the Seed scheme.
+ SeedSerializer = json.NewSerializerWithOptions(json.DefaultMetaFactory, SeedScheme, SeedScheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: false})
+ // SeedCodec is a codec factory using the Seed scheme.
+ SeedCodec = serializer.NewCodecFactory(SeedScheme)
+
+ // ShootSerializer is a YAML serializer using the Shoot scheme.
+ ShootSerializer = json.NewSerializerWithOptions(json.DefaultMetaFactory, ShootScheme, ShootScheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: false})
+ // ShootCodec is a codec factory using the Shoot scheme.
+ ShootCodec = serializer.NewCodecFactory(ShootScheme)
+)
+
+// DefaultGetOptions are the default options for GET requests.
+func DefaultGetOptions() metav1.GetOptions { return metav1.GetOptions{} }
+
+// DefaultCreateOptions are the default options for CREATE requests.
+func DefaultCreateOptions() metav1.CreateOptions { return metav1.CreateOptions{} }
+
+// DefaultUpdateOptions are the default options for UPDATE requests.
+func DefaultUpdateOptions() metav1.UpdateOptions { return metav1.UpdateOptions{} }
+
+func init() {
+ gardenSchemeBuilder := runtime.NewSchemeBuilder(
+ corescheme.AddToScheme,
+ gardencorescheme.AddToScheme,
+ )
+ utilruntime.Must(gardenSchemeBuilder.AddToScheme(GardenScheme))
+
+ seedSchemeBuilder := runtime.NewSchemeBuilder(
+ corescheme.AddToScheme,
+ dnsv1alpha1.AddToScheme,
+ gardenextensionsscheme.AddToScheme,
+ resourcesscheme.AddToScheme,
+ autoscalingscheme.AddToScheme,
+ hvpav1alpha1.AddToScheme,
+ druidv1alpha1.AddToScheme,
+ apiextensionsscheme.AddToScheme,
+ )
+ utilruntime.Must(seedSchemeBuilder.AddToScheme(SeedScheme))
+
+ shootSchemeBuilder := runtime.NewSchemeBuilder(
+ corescheme.AddToScheme,
+ apiextensionsscheme.AddToScheme,
+ apiregistrationscheme.AddToScheme,
+ autoscalingscheme.AddToScheme,
+ )
+ utilruntime.Must(shootSchemeBuilder.AddToScheme(ShootScheme))
+
+ plantSchemeBuilder := runtime.NewSchemeBuilder(
+ corescheme.AddToScheme,
+ gardencorescheme.AddToScheme,
+ )
+ utilruntime.Must(plantSchemeBuilder.AddToScheme(PlantScheme))
+}
+
+// MergeFunc determines how oldOj is merged into new oldObj.
+type MergeFunc func(newObj, oldObj *unstructured.Unstructured)
+
+// Applier is an interface which describes declarative operations to apply multiple
+// Kubernetes objects.
+type Applier interface {
+ ApplyManifest(ctx context.Context, unstructured UnstructuredReader, options MergeFuncs) error
+ DeleteManifest(ctx context.Context, unstructured UnstructuredReader, opts ...DeleteManifestOption) error
+}
+
+// Interface is used to wrap the interactions with a Kubernetes cluster
+// (which are performed with the help of kubernetes/client-go) in order to allow the implementation
+// of several Kubernetes versions.
+type Interface interface {
+ RESTConfig() *rest.Config
+ RESTMapper() meta.RESTMapper
+ RESTClient() rest.Interface
+
+ // Client returns the ClientSet's controller-runtime client. This client should be used by default, as it carries
+ // a cache, which uses SharedIndexInformers to keep up-to-date.
+ Client() client.Client
+ // DirectClient returns a controller-runtime client, which can be used to talk to the API server directly
+ // (without using a cache).
+ DirectClient() client.Client
+ // Cache returns the ClientSet's controller-runtime cache. It can be used to get Informers for arbitrary objects.
+ Cache() cache.Cache
+
+ // Applier returns an Applier which uses the ClientSet's client.
+ Applier() Applier
+ // ChartRenderer returns a ChartRenderer populated with the cluster's Capabilities.
+ ChartRenderer() chartrenderer.Interface
+ // ChartApplier returns a ChartApplier using the ClientSet's ChartRenderer and Applier.
+ ChartApplier() ChartApplier
+
+ Kubernetes() kubernetesclientset.Interface
+ GardenCore() gardencoreclientset.Interface
+ APIExtension() apiextensionsclientset.Interface
+ APIRegistration() apiregistrationclientset.Interface
+
+ // Deprecated: Use `Client()` and utils instead.
+ ForwardPodPort(string, string, int, int) (chan struct{}, error)
+ CheckForwardPodPort(string, string, int, int) error
+
+ // Version returns the server version of the targeted Kubernetes cluster.
+ Version() string
+ // DiscoverVersion tries to retrieve the server version of the targeted Kubernetes cluster and updates the
+ // ClientSet's saved version accordingly. Use Version if you only want to retrieve the kubernetes version instead
+ // of refreshing the ClientSet's saved version.
+ DiscoverVersion() (*version.Info, error)
+
+ // Start starts the cache of the ClientSet's controller-runtime client and returns immediately.
+ // It must be called first before using the client to retrieve objects from the API server.
+ Start(ctx context.Context)
+ // WaitForCacheSync waits for the cache of the ClientSet's controller-runtime client to be synced.
+ WaitForCacheSync(ctx context.Context) bool
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/associations.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/associations.go
new file mode 100644
index 0000000..29cadc6
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/associations.go
@@ -0,0 +1,163 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+import (
+ "context"
+ "fmt"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardencorelisters "github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1"
+ "github.com/gardener/gardener/pkg/logger"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// DetermineShootsAssociatedTo gets a to determine the Shoots resources which are associated
+// to given (either a CloudProfile a or a Seed object).
+func DetermineShootsAssociatedTo(obj interface{}, shootLister gardencorelisters.ShootLister) ([]string, error) {
+ var associatedShoots []string
+ shoots, err := shootLister.List(labels.Everything())
+ if err != nil {
+ logger.Logger.Info(err.Error())
+ return nil, err
+ }
+
+ for _, shoot := range shoots {
+ switch t := obj.(type) {
+ case *gardencorev1beta1.CloudProfile:
+ cloudProfile := obj.(*gardencorev1beta1.CloudProfile)
+ if shoot.Spec.CloudProfileName == cloudProfile.Name {
+ associatedShoots = append(associatedShoots, fmt.Sprintf("%s/%s", shoot.Namespace, shoot.Name))
+ }
+ case *gardencorev1beta1.Seed:
+ seed := obj.(*gardencorev1beta1.Seed)
+ if shoot.Spec.SeedName != nil && *shoot.Spec.SeedName == seed.Name {
+ associatedShoots = append(associatedShoots, fmt.Sprintf("%s/%s", shoot.Namespace, shoot.Name))
+ }
+ case *gardencorev1beta1.SecretBinding:
+ binding := obj.(*gardencorev1beta1.SecretBinding)
+ if shoot.Spec.SecretBindingName == binding.Name && shoot.Namespace == binding.Namespace {
+ associatedShoots = append(associatedShoots, fmt.Sprintf("%s/%s", shoot.Namespace, shoot.Name))
+ }
+ default:
+ return nil, fmt.Errorf("unable to determine Shoot associations, due to unknown type %t", t)
+ }
+ }
+ return associatedShoots, nil
+}
+
+// DetermineSecretBindingAssociations gets a to determine the SecretBinding
+// resources which are associated to given Quota .
+func DetermineSecretBindingAssociations(quota *gardencorev1beta1.Quota, bindingLister gardencorelisters.SecretBindingLister) ([]string, error) {
+ var associatedBindings []string
+ bindings, err := bindingLister.List(labels.Everything())
+ if err != nil {
+ return nil, err
+ }
+
+ for _, binding := range bindings {
+ for _, quotaRef := range binding.Quotas {
+ if quotaRef.Name == quota.Name && quotaRef.Namespace == quota.Namespace {
+ associatedBindings = append(associatedBindings, fmt.Sprintf("%s/%s", binding.Namespace, binding.Name))
+ }
+ }
+ }
+ return associatedBindings, nil
+}
+
+// DetermineBackupBucketAssociations determine the BackupBucket resources which are associated
+// to seed with name
+func DetermineBackupBucketAssociations(ctx context.Context, c client.Client, seedName string) ([]string, error) {
+ return determineAssociations(ctx, c, seedName, &gardencorev1beta1.BackupBucketList{}, func(o runtime.Object) (string, error) {
+ backupBucket, ok := o.(*gardencorev1beta1.BackupBucket)
+ if !ok {
+ return "", fmt.Errorf("got unexpected object when expecting BackupBucket")
+ }
+ if backupBucket.Spec.SeedName == nil {
+ return "", nil
+ }
+ return *backupBucket.Spec.SeedName, nil
+ })
+}
+
+// DetermineBackupEntryAssociations determine the BackupEntry resources which are associated
+// to seed with name
+func DetermineBackupEntryAssociations(ctx context.Context, c client.Client, seedName string) ([]string, error) {
+ return determineAssociations(ctx, c, seedName, &gardencorev1beta1.BackupEntryList{}, func(o runtime.Object) (string, error) {
+ backupEntry, ok := o.(*gardencorev1beta1.BackupEntry)
+ if !ok {
+ return "", fmt.Errorf("got unexpected object when expecting BackupEntry")
+ }
+ if backupEntry.Spec.SeedName == nil {
+ return "", nil
+ }
+ return *backupEntry.Spec.SeedName, nil
+ })
+}
+
+// DetermineControllerInstallationAssociations determine the ControllerInstallation resources which are associated
+// to seed with name
+func DetermineControllerInstallationAssociations(ctx context.Context, c client.Client, seedName string) ([]string, error) {
+ return determineAssociations(ctx, c, seedName, &gardencorev1beta1.ControllerInstallationList{}, func(o runtime.Object) (string, error) {
+ controllerInstallation, ok := o.(*gardencorev1beta1.ControllerInstallation)
+ if !ok {
+ return "", fmt.Errorf("got unexpected object when expecting ControllerInstallation")
+ }
+ return controllerInstallation.Spec.SeedRef.Name, nil
+ })
+}
+
+// DetermineShootAssociations determine the Shoot resources which are associated
+// to seed with name
+func DetermineShootAssociations(ctx context.Context, c client.Client, seedName string) ([]string, error) {
+ return determineAssociations(ctx, c, seedName, &gardencorev1beta1.ShootList{}, func(o runtime.Object) (string, error) {
+ shoot, ok := o.(*gardencorev1beta1.Shoot)
+ if !ok {
+ return "", fmt.Errorf("got unexpected object when expecting Shoot")
+ }
+ if shoot.Spec.SeedName == nil {
+ return "", nil
+ }
+ return *shoot.Spec.SeedName, nil
+ })
+}
+
+func determineAssociations(ctx context.Context, c client.Client, seedName string, listObj client.ObjectList, seedNameFunc func(runtime.Object) (string, error)) ([]string, error) {
+ if err := c.List(ctx, listObj); err != nil {
+ return nil, err
+ }
+
+ var associations []string
+ err := meta.EachListItem(listObj, func(obj runtime.Object) error {
+ name, err := seedNameFunc(obj)
+ if err != nil {
+ return err
+ }
+
+ if name == seedName {
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ associations = append(associations, accessor.GetName())
+ }
+ return nil
+ })
+ return associations, err
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/finalizers.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/finalizers.go
new file mode 100644
index 0000000..8ef7253
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/finalizers.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// PatchFinalizers adds the given finalizers to the object via a patch request.
+func PatchFinalizers(ctx context.Context, c client.Client, obj client.Object, finalizers ...string) error {
+ beforePatch := obj.DeepCopyObject()
+
+ for _, finalizer := range finalizers {
+ controllerutil.AddFinalizer(obj, finalizer)
+ }
+
+ return c.Patch(ctx, obj, client.MergeFromWithOptions(beforePatch, client.MergeFromWithOptimisticLock{}))
+}
+
+// PatchRemoveFinalizers removes the given finalizers from the object via a patch request.
+func PatchRemoveFinalizers(ctx context.Context, c client.Client, obj client.Object, finalizers ...string) error {
+ beforePatch := obj.DeepCopyObject()
+
+ for _, finalizer := range finalizers {
+ controllerutil.RemoveFinalizer(obj, finalizer)
+ }
+
+ return c.Patch(ctx, obj, client.MergeFromWithOptions(beforePatch, client.MergeFromWithOptimisticLock{}))
+}
+
+// EnsureFinalizer ensure the is present for the object.
+func EnsureFinalizer(ctx context.Context, c client.Client, obj client.Object, finalizer string) error {
+ if err := kutil.TryUpdate(ctx, retry.DefaultBackoff, c, obj, func() error {
+ controllerutil.AddFinalizer(obj, finalizer)
+ return nil
+ }); err != nil {
+ return fmt.Errorf("could not ensure %q finalizer: %+v", finalizer, err)
+ }
+ return nil
+}
+
+// RemoveGardenerFinalizer removes the gardener finalizer from the object.
+func RemoveGardenerFinalizer(ctx context.Context, c client.Client, obj client.Object) error {
+ return RemoveFinalizer(ctx, c, obj, gardencorev1beta1.GardenerName)
+}
+
+// RemoveFinalizer removes the from the object.
+func RemoveFinalizer(ctx context.Context, c client.Client, obj client.Object, finalizer string) error {
+ if err := kutil.TryUpdate(ctx, retry.DefaultBackoff, c, obj, func() error {
+ controllerutil.RemoveFinalizer(obj, finalizer)
+ return nil
+ }); client.IgnoreNotFound(err) != nil {
+ return fmt.Errorf("could not remove %q finalizer: %+v", finalizer, err)
+ }
+
+ // Wait until the above modifications are reflected in the cache to prevent unwanted reconcile
+ // operations (sometimes the cache is not synced fast enough).
+ pollerCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+ return wait.PollImmediateUntil(time.Second, func() (bool, error) {
+ err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj)
+ if apierrors.IsNotFound(err) {
+ return true, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ if !controllerutil.ContainsFinalizer(obj, finalizer) {
+ return true, nil
+ }
+ return false, nil
+ }, pollerCtx.Done())
+}
+
+// HasFinalizer checks whether the given obj has the given finalizer.
+// Deprecated: use controllerutil.ContainsFinalizer instead
+var HasFinalizer = controllerutil.ContainsFinalizer
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/miscellaneous.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/miscellaneous.go
new file mode 100644
index 0000000..486bcc0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/miscellaneous.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+import (
+ "strings"
+ "time"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ "github.com/gardener/gardener/pkg/operation/common"
+ "github.com/gardener/gardener/pkg/utils"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const separator = ","
+
+// GetTasks returns the list of tasks in the ShootTasks annotation.
+func GetTasks(annotations map[string]string) []string {
+ var tasks []string
+ if val := annotations[common.ShootTasks]; len(val) > 0 {
+ tasks = strings.Split(val, separator)
+ }
+ return tasks
+}
+
+// HasTask checks if the passed task is part of the ShootTasks annotation.
+func HasTask(annotations map[string]string, task string) bool {
+ tasks := GetTasks(annotations)
+ if len(tasks) == 0 {
+ return false
+ }
+ return utils.ValueExists(task, tasks)
+}
+
+// AddTasks adds tasks to the ShootTasks annotation of the passed map.
+func AddTasks(annotations map[string]string, tasksToAdd ...string) {
+ tasks := GetTasks(annotations)
+
+ for _, taskToAdd := range tasksToAdd {
+ if !utils.ValueExists(taskToAdd, tasks) {
+ tasks = append(tasks, taskToAdd)
+ }
+ }
+
+ setTaskAnnotations(annotations, tasks)
+}
+
+// RemoveTasks removes tasks from the ShootTasks annotation of the passed map.
+func RemoveTasks(annotations map[string]string, tasksToRemove ...string) {
+ tasks := GetTasks(annotations)
+
+ for i := len(tasks) - 1; i >= 0; i-- {
+ if utils.ValueExists(tasks[i], tasksToRemove) {
+ tasks = append((tasks)[:i], (tasks)[i+1:]...)
+ }
+ }
+
+ setTaskAnnotations(annotations, tasks)
+}
+
+// RemoveAllTasks removes the ShootTasks annotation from the passed map.
+func RemoveAllTasks(annotations map[string]string) {
+ delete(annotations, common.ShootTasks)
+}
+
+func setTaskAnnotations(annotations map[string]string, tasks []string) {
+ if len(tasks) == 0 {
+ RemoveAllTasks(annotations)
+ return
+ }
+
+ annotations[common.ShootTasks] = strings.Join(tasks, separator)
+}
+
+var (
+ // Now is a function for returning the current time.
+ Now = time.Now
+ // RandomDuration is a function for returning a random duration.
+ RandomDuration = utils.RandomDuration
+)
+
+// ReconcileOncePer24hDuration returns the duration until the next reconciliation should happen while respecting that
+// only one reconciliation should happen per 24h. If the deletion timestamp is set or the generation has changed or the
+// last operation does not indicate success or indicates that the last reconciliation happened more than 24h ago then 0
+// will be returned.
+func ReconcileOncePer24hDuration(objectMeta metav1.ObjectMeta, observedGeneration int64, lastOperation *gardencorev1beta1.LastOperation) time.Duration {
+ if objectMeta.DeletionTimestamp != nil {
+ return 0
+ }
+
+ if objectMeta.Generation != observedGeneration {
+ return 0
+ }
+
+ if lastOperation == nil ||
+ lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded ||
+ (lastOperation.Type != gardencorev1beta1.LastOperationTypeCreate && lastOperation.Type != gardencorev1beta1.LastOperationTypeReconcile) {
+ return 0
+ }
+
+ // If last reconciliation happened more than 24h ago then we want to reconcile immediately, so let's only compute
+ // a delay if the last reconciliation was within the last 24h.
+ if lastReconciliation := lastOperation.LastUpdateTime.Time; Now().UTC().Before(lastReconciliation.UTC().Add(24 * time.Hour)) {
+ durationUntilLastReconciliationWas24hAgo := lastReconciliation.UTC().Add(24 * time.Hour).Sub(Now().UTC())
+ return RandomDuration(durationUntilLastReconciliationWas24hAgo)
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/operations.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/operations.go
new file mode 100644
index 0000000..941b0fb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/operations.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+import (
+ "context"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+
+ "k8s.io/apimachinery/pkg/util/wait"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// RemoveGardenerOperationAnnotation removes a gardener operation annotation and retries the operation with the given .
+func RemoveGardenerOperationAnnotation(ctx context.Context, backoff wait.Backoff, cli client.Client, obj client.Object) error {
+ return kutil.TryUpdate(ctx, backoff, cli, obj, func() error {
+ delete(obj.GetAnnotations(), v1beta1constants.GardenerOperation)
+ return nil
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/pointers.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/pointers.go
new file mode 100644
index 0000000..ed643a4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/pointers.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+// BoolPtrDerefOr dereferences the given bool if it's non-nil. Otherwise, returns the default.
+func BoolPtrDerefOr(b *bool, defaultValue bool) bool {
+ if b == nil {
+ return defaultValue
+ }
+ return *b
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/seedfilter.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/seedfilter.go
new file mode 100644
index 0000000..751611e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/seedfilter.go
@@ -0,0 +1,154 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+import (
+ "context"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardencorelisters "github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1"
+ "github.com/gardener/gardener/pkg/gardenlet/apis/config"
+ confighelper "github.com/gardener/gardener/pkg/gardenlet/apis/config/helper"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// LabelsMatchFor checks whether the given label selector matches for the given set of labels.
+func LabelsMatchFor(l map[string]string, labelSelector *metav1.LabelSelector) bool {
+ selector, err := metav1.LabelSelectorAsSelector(labelSelector)
+ if err != nil {
+ return false
+ }
+ return selector.Matches(labels.Set(l))
+}
+
+// SeedFilterFunc returns a filtering func for the seeds and the given label selector.
+func SeedFilterFunc(seedName string, labelSelector *metav1.LabelSelector) func(obj interface{}) bool {
+ return func(obj interface{}) bool {
+ seed, ok := obj.(*gardencorev1beta1.Seed)
+ if !ok {
+ return false
+ }
+ if len(seedName) > 0 {
+ return seed.Name == seedName
+ }
+ return LabelsMatchFor(seed.Labels, labelSelector)
+ }
+}
+
+// ShootFilterFunc returns a filtering func for the seeds and the given label selector.
+func ShootFilterFunc(seedName string, seedLister gardencorelisters.SeedLister, labelSelector *metav1.LabelSelector) func(obj interface{}) bool {
+ return func(obj interface{}) bool {
+ shoot, ok := obj.(*gardencorev1beta1.Shoot)
+ if !ok {
+ return false
+ }
+ if shoot.Spec.SeedName == nil {
+ return false
+ }
+ if len(seedName) > 0 {
+ if shoot.Status.SeedName == nil || *shoot.Spec.SeedName == *shoot.Status.SeedName {
+ return *shoot.Spec.SeedName == seedName
+ }
+ return *shoot.Status.SeedName == seedName
+ }
+ if shoot.Status.SeedName == nil || *shoot.Spec.SeedName == *shoot.Status.SeedName {
+ return SeedLabelsMatch(seedLister, *shoot.Spec.SeedName, labelSelector)
+ }
+ return SeedLabelsMatch(seedLister, *shoot.Status.SeedName, labelSelector)
+ }
+}
+
+// ShootIsManagedByThisGardenlet checks if the given shoot is managed by this gardenlet by comparing it with the seed name from the GardenletConfiguration
+// or by checking whether the seed labels mathes the seed seoector from the GardenletConfiguration.
+func ShootIsManagedByThisGardenlet(shoot *gardencorev1beta1.Shoot, gc *config.GardenletConfiguration, seedLister gardencorelisters.SeedLister) bool {
+ seedName := confighelper.SeedNameFromSeedConfig(gc.SeedConfig)
+ if len(seedName) > 0 {
+ return *shoot.Spec.SeedName == seedName
+ }
+ return SeedLabelsMatch(seedLister, *shoot.Spec.SeedName, gc.SeedSelector)
+}
+
+// SeedLabelsMatch fetches the given seed via a lister by its name and then checks whether the given label selector matches
+// the seed labels.
+func SeedLabelsMatch(seedLister gardencorelisters.SeedLister, seedName string, labelSelector *metav1.LabelSelector) bool {
+ seed, err := seedLister.Get(seedName)
+ if err != nil {
+ return false
+ }
+
+ return LabelsMatchFor(seed.Labels, labelSelector)
+}
+
+// seedLabelsMatchWithClient fetches the given seed by its name from the client and then checks whether the given
+// label selector matches the seed labels.
+func seedLabelsMatchWithClient(ctx context.Context, c client.Client, seedName string, labelSelector *metav1.LabelSelector) bool {
+ seed := &gardencorev1beta1.Seed{}
+ if err := c.Get(ctx, client.ObjectKey{Name: seedName}, seed); err != nil {
+ return false
+ }
+
+ return LabelsMatchFor(seed.Labels, labelSelector)
+}
+
+// ControllerInstallationFilterFunc returns a filtering func for the seeds and the given label selector.
+func ControllerInstallationFilterFunc(seedName string, seedLister gardencorelisters.SeedLister, labelSelector *metav1.LabelSelector) func(obj interface{}) bool {
+ return func(obj interface{}) bool {
+ controllerInstallation, ok := obj.(*gardencorev1beta1.ControllerInstallation)
+ if !ok {
+ return false
+ }
+ if len(seedName) > 0 {
+ return controllerInstallation.Spec.SeedRef.Name == seedName
+ }
+ return SeedLabelsMatch(seedLister, controllerInstallation.Spec.SeedRef.Name, labelSelector)
+ }
+}
+
+// BackupBucketFilterFunc returns a filtering func for the seeds and the given label selector.
+func BackupBucketFilterFunc(ctx context.Context, c client.Client, seedName string, labelSelector *metav1.LabelSelector) func(obj interface{}) bool {
+ return func(obj interface{}) bool {
+ backupBucket, ok := obj.(*gardencorev1beta1.BackupBucket)
+ if !ok {
+ return false
+ }
+ if backupBucket.Spec.SeedName == nil {
+ return false
+ }
+ if len(seedName) > 0 {
+ return *backupBucket.Spec.SeedName == seedName
+ }
+ return seedLabelsMatchWithClient(ctx, c, *backupBucket.Spec.SeedName, labelSelector)
+ }
+}
+
+// BackupEntryFilterFunc returns a filtering func for the seeds and the given label selector.
+func BackupEntryFilterFunc(ctx context.Context, c client.Client, seedName string, labelSelector *metav1.LabelSelector) func(obj interface{}) bool {
+ return func(obj interface{}) bool {
+ backupEntry, ok := obj.(*gardencorev1beta1.BackupEntry)
+ if !ok {
+ return false
+ }
+ if backupEntry.Spec.SeedName == nil {
+ return false
+ }
+ if len(seedName) > 0 {
+ return *backupEntry.Spec.SeedName == seedName
+ }
+ return seedLabelsMatchWithClient(ctx, c, *backupEntry.Spec.SeedName, labelSelector)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/controllerutils/worker.go b/vendor/github.com/gardener/gardener/pkg/controllerutils/worker.go
new file mode 100644
index 0000000..735b5e5
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/controllerutils/worker.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controllerutils
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/gardener/gardener/pkg/logger"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/runtime/inject"
+)
+
+// DeprecatedCreateWorker creates and runs a worker thread that just processes items in the
+// specified queue. The worker will run until stopCh is closed. The worker will be
+// added to the wait group when started and marked done when finished.
+// Deprecated: Use CreateWorker instead.
+func DeprecatedCreateWorker(ctx context.Context, queue workqueue.RateLimitingInterface, resourceType string, reconciler func(key string) error, waitGroup *sync.WaitGroup, workerCh chan int) {
+ CreateWorker(ctx, queue, resourceType, reconcile.Func(func(_ context.Context, req reconcile.Request) (reconcile.Result, error) {
+ meta := kutil.ObjectMeta(req.Namespace, req.Name)
+ key, err := cache.MetaNamespaceKeyFunc(&meta)
+ if err != nil {
+ logger.Logger.WithError(err).Error("Could not create key from meta")
+ return reconcile.Result{}, nil
+ }
+
+ return reconcile.Result{}, reconciler(key)
+ }), waitGroup, workerCh)
+}
+
+// CreateWorker creates and runs a worker thread that just processes items in the
+// specified queue. The worker will run until stopCh is closed. The worker will be
+// added to the wait group when started and marked done when finished.
+// The given context is injected into the `reconciler` if it implements `inject.Stoppable`.
+// Optionally passed inject functions are called with the `reconciler` but potentially returned errors are disregarded.
+func CreateWorker(ctx context.Context, queue workqueue.RateLimitingInterface, resourceType string, reconciler reconcile.Reconciler, waitGroup *sync.WaitGroup, workerCh chan int, injectFn ...inject.Func) {
+ fns := append(injectFn, func(i interface{}) error {
+ _, err := inject.StopChannelInto(ctx.Done(), i)
+ return err
+ })
+
+ for _, f := range fns {
+ if err := f(reconciler); err != nil {
+ logger.Logger.Errorf("An error occurred while reconciler injection: %v", err)
+ }
+ }
+
+ waitGroup.Add(1)
+ workerCh <- 1
+ go func() {
+ wait.UntilWithContext(ctx, func(ctx context.Context) {
+ worker(ctx, queue, resourceType, reconciler)
+ }, time.Second)
+ workerCh <- -1
+ waitGroup.Done()
+ }()
+}
+
+func requestFromKey(key interface{}) (reconcile.Request, error) {
+ switch v := key.(type) {
+ case string:
+ namespace, name, err := cache.SplitMetaNamespaceKey(key.(string))
+ if err != nil {
+ return reconcile.Request{}, err
+ }
+
+ return reconcile.Request{NamespacedName: types.NamespacedName{Namespace: namespace, Name: name}}, nil
+ case reconcile.Request:
+ return v, nil
+ default:
+ return reconcile.Request{}, fmt.Errorf("unknown key type %T", key)
+ }
+}
+
+// worker runs a worker thread that just dequeues items, processes them, and marks them done.
+// It enforces that the reconciler is never invoked concurrently with the same key.
+func worker(ctx context.Context, queue workqueue.RateLimitingInterface, resourceType string, reconciler reconcile.Reconciler) {
+ exit := false
+ for !exit {
+ exit = func() bool {
+ key, quit := queue.Get()
+ if quit {
+ return true
+ }
+ defer queue.Done(key)
+
+ req, err := requestFromKey(key)
+ if err != nil {
+ logger.Logger.WithError(err).Error("Cannot obtain request from key")
+ queue.Forget(key)
+ return false
+ }
+
+ res, err := reconciler.Reconcile(ctx, req)
+ if err != nil {
+ logger.Logger.Infof("Error syncing %s %v: %v", resourceType, key, err)
+ queue.AddRateLimited(key)
+ return false
+ }
+
+ if res.RequeueAfter > 0 {
+ queue.AddAfter(key, res.RequeueAfter)
+ return false
+ }
+ if res.Requeue {
+ queue.AddRateLimited(key)
+ return false
+ }
+ queue.Forget(key)
+ return false
+ }()
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/extensions/cluster.go b/vendor/github.com/gardener/gardener/pkg/extensions/cluster.go
new file mode 100644
index 0000000..5619893
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/extensions/cluster.go
@@ -0,0 +1,153 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package extensions
+
+import (
+ "context"
+
+ "github.com/gardener/gardener/pkg/apis/core"
+ gardencoreinstall "github.com/gardener/gardener/pkg/apis/core/install"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var gardenscheme *runtime.Scheme
+
+func init() {
+ gardenscheme = runtime.NewScheme()
+ gardencoreinstall.Install(gardenscheme)
+}
+
+// Cluster contains the decoded resources of Gardener's extension Cluster resource.
+type Cluster struct {
+ ObjectMeta metav1.ObjectMeta
+ CloudProfile *gardencorev1beta1.CloudProfile
+ Seed *gardencorev1beta1.Seed
+ Shoot *gardencorev1beta1.Shoot
+}
+
+// GetCluster tries to read Gardener's Cluster extension resource in the given namespace.
+func GetCluster(ctx context.Context, c client.Client, namespace string) (*Cluster, error) {
+ cluster := &extensionsv1alpha1.Cluster{}
+ if err := c.Get(ctx, kutil.Key(namespace), cluster); err != nil {
+ return nil, err
+ }
+
+ decoder, err := NewGardenDecoder()
+ if err != nil {
+ return nil, err
+ }
+
+ cloudProfile, err := CloudProfileFromCluster(decoder, cluster)
+ if err != nil {
+ return nil, err
+ }
+ seed, err := SeedFromCluster(decoder, cluster)
+ if err != nil {
+ return nil, err
+ }
+ shoot, err := ShootFromCluster(decoder, cluster)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Cluster{cluster.ObjectMeta, cloudProfile, seed, shoot}, nil
+}
+
+// CloudProfileFromCluster returns the CloudProfile resource inside the Cluster resource.
+func CloudProfileFromCluster(decoder runtime.Decoder, cluster *extensionsv1alpha1.Cluster) (*gardencorev1beta1.CloudProfile, error) {
+ var (
+ cloudProfileInternal = &core.CloudProfile{}
+ cloudProfile = &gardencorev1beta1.CloudProfile{}
+ )
+
+ if cluster.Spec.CloudProfile.Raw == nil {
+ return nil, nil
+ }
+ if _, _, err := decoder.Decode(cluster.Spec.CloudProfile.Raw, nil, cloudProfileInternal); err != nil {
+ return nil, err
+ }
+ if err := gardenscheme.Convert(cloudProfileInternal, cloudProfile, nil); err != nil {
+ return nil, err
+ }
+
+ return cloudProfile, nil
+}
+
+// SeedFromCluster returns the Seed resource inside the Cluster resource.
+func SeedFromCluster(decoder runtime.Decoder, cluster *extensionsv1alpha1.Cluster) (*gardencorev1beta1.Seed, error) {
+ var (
+ seedInternal = &core.Seed{}
+ seed = &gardencorev1beta1.Seed{}
+ )
+
+ if cluster.Spec.Seed.Raw == nil {
+ return nil, nil
+ }
+ if _, _, err := decoder.Decode(cluster.Spec.Seed.Raw, nil, seedInternal); err != nil {
+ return nil, err
+ }
+ if err := gardenscheme.Convert(seedInternal, seed, nil); err != nil {
+ return nil, err
+ }
+
+ return seed, nil
+}
+
+// ShootFromCluster returns the Shoot resource inside the Cluster resource.
+func ShootFromCluster(decoder runtime.Decoder, cluster *extensionsv1alpha1.Cluster) (*gardencorev1beta1.Shoot, error) {
+ var (
+ shootInternal = &core.Shoot{}
+ shoot = &gardencorev1beta1.Shoot{}
+ )
+
+ if cluster.Spec.Shoot.Raw == nil {
+ return nil, nil
+ }
+ if _, _, err := decoder.Decode(cluster.Spec.Shoot.Raw, nil, shootInternal); err != nil {
+ return nil, err
+ }
+ if err := gardenscheme.Convert(shootInternal, shoot, nil); err != nil {
+ return nil, err
+ }
+
+ return shoot, nil
+}
+
+// GetShoot tries to read Gardener's Cluster extension resource in the given namespace and return the embedded Shoot resource.
+func GetShoot(ctx context.Context, c client.Client, namespace string) (*gardencorev1beta1.Shoot, error) {
+ cluster := &extensionsv1alpha1.Cluster{}
+ if err := c.Get(ctx, kutil.Key(namespace), cluster); err != nil {
+ return nil, err
+ }
+
+ decoder, err := NewGardenDecoder()
+ if err != nil {
+ return nil, err
+ }
+
+ return ShootFromCluster(decoder, cluster)
+}
+
+// NewGardenDecoder returns a new Garden API decoder.
+func NewGardenDecoder() (runtime.Decoder, error) {
+ return serializer.NewCodecFactory(gardenscheme).UniversalDecoder(), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go
new file mode 100644
index 0000000..3e61bbf
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go
@@ -0,0 +1,18 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +k8s:deepcopy-gen=package
+// +groupName=gardenlet.config.gardener.cloud
+
+package config // import "github.com/gardener/gardener/pkg/gardenlet/apis/config"
diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go
new file mode 100644
index 0000000..8505cb2
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helper
+
+import (
+ "github.com/gardener/gardener/pkg/gardenlet/apis/config"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SeedNameFromSeedConfig returns an empty string if the given seed config is nil, or the
+// name inside the seed config.
+func SeedNameFromSeedConfig(seedConfig *config.SeedConfig) string {
+ if seedConfig == nil {
+ return ""
+ }
+ return seedConfig.Seed.Name
+}
+
+// StaleExtensionHealthChecksThreshold returns nil if the given config is nil or the check
+// for stale health checks is not enabled. Otherwise it returns the threshold from the given config.
+func StaleExtensionHealthChecksThreshold(c *config.StaleExtensionHealthChecks) *metav1.Duration {
+ if c != nil && c.Enabled {
+ return c.Threshold
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go
new file mode 100644
index 0000000..cc2edeb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "gardenlet.config.gardener.cloud"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder used to register the Shoot resource.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a pointer to SchemeBuilder.AddToScheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &GardenletConfiguration{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go
new file mode 100644
index 0000000..8a80c67
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go
@@ -0,0 +1,363 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ componentbaseconfig "k8s.io/component-base/config"
+ "k8s.io/klog"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GardenletConfiguration defines the configuration for the Gardenlet.
+type GardenletConfiguration struct {
+ metav1.TypeMeta
+ // GardenClientConnection specifies the kubeconfig file and the client connection settings
+ // for the proxy server to use when communicating with the garden apiserver.
+ GardenClientConnection *GardenClientConnection
+ // SeedClientConnection specifies the client connection settings for the proxy server
+ // to use when communicating with the seed apiserver.
+ SeedClientConnection *SeedClientConnection
+ // ShootClientConnection specifies the client connection settings for the proxy server
+ // to use when communicating with the shoot apiserver.
+ ShootClientConnection *ShootClientConnection
+ // Controllers defines the configuration of the controllers.
+ Controllers *GardenletControllerConfiguration
+ // Resources defines the total capacity for seed resources and the amount reserved for use by Gardener.
+ Resources *ResourcesConfiguration
+ // LeaderElection defines the configuration of leader election client.
+ LeaderElection *LeaderElectionConfiguration
+ // LogLevel is the level/severity for the logs. Must be one of [info,debug,error].
+ LogLevel *string
+ // KubernetesLogLevel is the log level used for Kubernetes' k8s.io/klog functions.
+ KubernetesLogLevel *klog.Level
+ // Server defines the configuration of the HTTP server.
+ Server *ServerConfiguration
+ // FeatureGates is a map of feature names to bools that enable or disable alpha/experimental
+ // features. This field modifies piecemeal the built-in default values from
+ // "github.com/gardener/gardener/pkg/gardenlet/features/features.go".
+ // Default: nil
+ FeatureGates map[string]bool
+ // SeedConfig contains configuration for the seed cluster. Must not be set if seed selector is set.
+ // In this case the gardenlet creates the `Seed` object itself based on the provided config.
+ SeedConfig *SeedConfig
+ // SeedSelector contains an optional list of labels on `Seed` resources that shall be managed by
+ // this gardenlet instance. In this case the `Seed` object is not managed by the Gardenlet and must
+ // be created by an operator/administrator.
+ SeedSelector *metav1.LabelSelector
+ // Logging contains an optional configurations for the logging stack deployed
+ // by the Gardenlet in the seed clusters.
+ Logging *Logging
+ // SNI contains an optional configuration for the APIServerSNI feature used
+ // by the Gardenlet in the seed clusters.
+ SNI *SNI
+}
+
+// GardenClientConnection specifies the kubeconfig file and the client connection settings
+// for the proxy server to use when communicating with the garden apiserver.
+type GardenClientConnection struct {
+ componentbaseconfig.ClientConnectionConfiguration
+ // GardenClusterAddress is the external address that the gardenlets can use to remotely connect to the Garden
+ // cluster. It is needed in case the gardenlet deploys itself into shooted seeds.
+ GardenClusterAddress *string
+ // GardenClusterCACert is the external address that the gardenlets can use to remotely connect to the Garden
+ // cluster. It is needed in case the gardenlet deploys itself into shooted seeds.
+ GardenClusterCACert []byte
+ // BootstrapKubeconfig is a reference to a secret that contains a data key 'kubeconfig' whose value
+ // is a kubeconfig that can be used for bootstrapping. If `kubeconfig` is given then only this kubeconfig
+ // will be considered.
+ BootstrapKubeconfig *corev1.SecretReference
+ // KubeconfigSecret is the reference to a secret object that stores the gardenlet's kubeconfig that
+ // it uses to communicate with the garden cluster. If `kubeconfig` is given then only this kubeconfig
+ // will be considered.
+ KubeconfigSecret *corev1.SecretReference
+}
+
+// SeedClientConnection specifies the client connection settings
+// for the proxy server to use when communicating with the seed apiserver.
+type SeedClientConnection struct {
+ componentbaseconfig.ClientConnectionConfiguration
+}
+
+// ShootClientConnection specifies the client connection settings
+// for the proxy server to use when communicating with the shoot apiserver.
+type ShootClientConnection struct {
+ componentbaseconfig.ClientConnectionConfiguration
+}
+
+// GardenletControllerConfiguration defines the configuration of the controllers.
+type GardenletControllerConfiguration struct {
+ // BackupBucket defines the configuration of the BackupBucket controller.
+ BackupBucket *BackupBucketControllerConfiguration
+ // BackupEntry defines the configuration of the BackupEntry controller.
+ BackupEntry *BackupEntryControllerConfiguration
+ // ControllerInstallation defines the configuration of the ControllerInstallation controller.
+ ControllerInstallation *ControllerInstallationControllerConfiguration
+ // ControllerInstallationCare defines the configuration of the ControllerInstallationCare controller.
+ ControllerInstallationCare *ControllerInstallationCareControllerConfiguration
+ // ControllerInstallationRequired defines the configuration of the ControllerInstallationRequired controller.
+ ControllerInstallationRequired *ControllerInstallationRequiredControllerConfiguration
+ // Seed defines the configuration of the Seed controller.
+ Seed *SeedControllerConfiguration
+ // Shoot defines the configuration of the Shoot controller.
+ Shoot *ShootControllerConfiguration
+ // ShootCare defines the configuration of the ShootCare controller.
+ ShootCare *ShootCareControllerConfiguration
+ // ShootStateSync defines the configuration of the ShootState controller.
+ ShootStateSync *ShootStateSyncControllerConfiguration
+ // ShootedSeedRegistration the configuration of the shooted seed registration controller.
+ ShootedSeedRegistration *ShootedSeedRegistrationControllerConfiguration
+ // SeedAPIServerNetworkPolicy defines the configuration of the SeedAPIServerNetworkPolicy controller.
+ SeedAPIServerNetworkPolicy *SeedAPIServerNetworkPolicyControllerConfiguration
+}
+
+// BackupBucketControllerConfiguration defines the configuration of the BackupBucket
+// controller.
+type BackupBucketControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on events.
+ ConcurrentSyncs *int
+}
+
+// BackupEntryControllerConfiguration defines the configuration of the BackupEntry
+// controller.
+type BackupEntryControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on events.
+ ConcurrentSyncs *int
+ // DeletionGracePeriodHours holds the period in number of days to delete the Backup Infrastructure after deletion timestamp is set.
+ // If value is set to 0 then the BackupEntryController will trigger deletion immediately.
+ DeletionGracePeriodHours *int
+}
+
+// ControllerInstallationControllerConfiguration defines the configuration of the
+// ControllerInstallation controller.
+type ControllerInstallationControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+}
+
+// ControllerInstallationCareControllerConfiguration defines the configuration of the ControllerInstallationCare
+// controller.
+type ControllerInstallationCareControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+ // SyncPeriod is the duration how often the existing resources are reconciled (how
+ // often the health check of ControllerInstallations is performed.
+ SyncPeriod *metav1.Duration
+}
+
+// ControllerInstallationRequiredControllerConfiguration defines the configuration of the ControllerInstallationRequired
+// controller.
+type ControllerInstallationRequiredControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+}
+
+// SeedControllerConfiguration defines the configuration of the Seed controller.
+type SeedControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+ // SyncPeriod is the duration how often the existing resources are reconciled.
+ SyncPeriod *metav1.Duration
+}
+
+// ShootControllerConfiguration defines the configuration of the CloudProfile
+// controller.
+type ShootControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+ // ProgressReportPeriod is the period how often the progress of a shoot operation will be reported in the
+ // Shoot's `.status.lastOperation` field. By default, the progress will be reported immediately after a task of the
+ // respective flow has been completed. If you set this to a value > 0 (e.g., 5s) then it will be only reported every
+ // 5 seconds. Any tasks that were completed in the meantime will not be reported.
+ ProgressReportPeriod *metav1.Duration
+ // ReconcileInMaintenanceOnly determines whether Shoot reconciliations happen only
+ // during its maintenance time window.
+ ReconcileInMaintenanceOnly *bool
+ // RespectSyncPeriodOverwrite determines whether a sync period overwrite of a
+ // Shoot (via annotation) is respected or not. Defaults to false.
+ RespectSyncPeriodOverwrite *bool
+ // RetryDuration is the maximum duration how often a reconciliation will be retried
+ // in case of errors.
+ RetryDuration *metav1.Duration
+ // SyncPeriod is the duration how often the existing resources are reconciled.
+ SyncPeriod *metav1.Duration
+ // DNSEntryTTLSeconds is the TTL in seconds that is being used for DNS entries when reconciling shoots.
+ // Default: 120s
+ DNSEntryTTLSeconds *int64
+}
+
+// ShootCareControllerConfiguration defines the configuration of the ShootCare
+// controller.
+type ShootCareControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+ // SyncPeriod is the duration how often the existing resources are reconciled (how
+ // often the health check of Shoot clusters is performed (only if no operation is
+ // already running on them).
+ SyncPeriod *metav1.Duration
+ // StaleExtensionHealthChecks defines the configuration of the check for stale extension health checks.
+ StaleExtensionHealthChecks *StaleExtensionHealthChecks
+ // ConditionThresholds defines the condition threshold per condition type.
+ ConditionThresholds []ConditionThreshold
+}
+
+// StaleExtensionHealthChecks defines the configuration of the check for stale extension health checks.
+type StaleExtensionHealthChecks struct {
+ // Enabled specifies whether the check for stale extensions health checks is enabled.
+ // Defaults to true.
+ Enabled bool
+ // Threshold configures the threshold when gardenlet considers a health check report of an extension CRD as outdated.
+ // The threshold should have some leeway in case a Gardener extension is temporarily unavailable.
+ // Defaults to 5m.
+ Threshold *metav1.Duration
+}
+
+// ShootedSeedRegistrationControllerConfiguration defines the configuration of the shooted seed registration controller.
+type ShootedSeedRegistrationControllerConfiguration struct {
+ // SyncJitterPeriod is a jitter duration for the reconciler sync that can be used to distribute the syncs randomly.
+ // If its value is greater than 0 then the shooted seeds will not be enqueued immediately but only after a random
+ // duration between 0 and the configured value. It is defaulted to 5m.
+ SyncJitterPeriod *metav1.Duration
+}
+
+// ConditionThreshold defines the duration how long a flappy condition stays in progressing state.
+type ConditionThreshold struct {
+ // Type is the type of the condition to define the threshold for.
+ Type string
+ // Duration is the duration how long the condition can stay in the progressing state.
+ Duration *metav1.Duration
+}
+
+// ShootStateSyncControllerConfiguration defines the configuration of the
+// ShootStateController controller.
+type ShootStateSyncControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on
+ // events.
+ ConcurrentSyncs *int
+ // SyncPeriod is the duration how often the existing extension resources are
+ // synced to the ShootState resource
+ SyncPeriod *metav1.Duration
+}
+
+// SeedAPIServerNetworkPolicyControllerConfiguration defines the configuration of the SeedAPIServerNetworkPolicy
+// controller.
+type SeedAPIServerNetworkPolicyControllerConfiguration struct {
+ // ConcurrentSyncs is the number of workers used for the controller to work on events.
+ ConcurrentSyncs *int
+}
+
+// ResourcesConfiguration defines the total capacity for seed resources and the amount reserved for use by Gardener.
+type ResourcesConfiguration struct {
+ // Capacity defines the total resources of a seed.
+ Capacity corev1.ResourceList
+ // Reserved defines the resources of a seed that are reserved for use by Gardener.
+ // Defaults to 0.
+ Reserved corev1.ResourceList
+}
+
+// LeaderElectionConfiguration defines the configuration of leader election
+// clients for components that can run with leader election enabled.
+type LeaderElectionConfiguration struct {
+ componentbaseconfig.LeaderElectionConfiguration
+ // LockObjectNamespace defines the namespace of the lock object.
+ LockObjectNamespace *string
+ // LockObjectName defines the lock object name.
+ LockObjectName *string
+}
+
+// SeedConfig contains configuration for the seed cluster.
+type SeedConfig struct {
+ gardencorev1beta1.Seed
+}
+
+// FluentBit contains configuration for Fluent Bit.
+type FluentBit struct {
+ // ServiceSection defines [SERVICE] configuration for the fluent-bit.
+ // If it is nil, fluent-bit uses default service configuration.
+ ServiceSection *string
+ // InputSection defines [INPUT] configuration for the fluent-bit.
+ // If it is nil, fluent-bit uses default input configuration.
+ InputSection *string
+ // OutputSection defines [OUTPUT] configuration for the fluent-bit.
+ // If it is nil, fluent-bit uses default output configuration.
+ OutputSection *string
+}
+
+// Logging contains configuration for the logging stack.
+type Logging struct {
+ // FluentBit contains configurations for the fluent-bit
+ FluentBit *FluentBit
+}
+
+// ServerConfiguration contains details for the HTTP(S) servers.
+type ServerConfiguration struct {
+ // HTTPS is the configuration for the HTTPS server.
+ HTTPS HTTPSServer
+}
+
+// Server contains information for HTTP(S) server configuration.
+type Server struct {
+ // BindAddress is the IP address on which to listen for the specified port.
+ BindAddress string
+ // Port is the port on which to serve unsecured, unauthenticated access.
+ Port int
+}
+
+// HTTPSServer is the configuration for the HTTPSServer server.
+type HTTPSServer struct {
+ // Server is the configuration for the bind address and the port.
+ Server
+ // TLSServer contains information about the TLS configuration for a HTTPS server. If empty then a proper server
+ // certificate will be self-generated during startup.
+ TLS *TLSServer
+}
+
+// TLSServer contains information about the TLS configuration for a HTTPS server.
+type TLSServer struct {
+ // ServerCertPath is the path to the server certificate file.
+ ServerCertPath string
+ // ServerKeyPath is the path to the private key file.
+ ServerKeyPath string
+}
+
+// SNI contains an optional configuration for the APIServerSNI feature used
+// by the Gardenlet in the seed clusters.
+type SNI struct {
+ // Ingress is the ingressgateway configuration.
+ Ingress *SNIIngress
+}
+
+// SNIIngress contains configuration of the ingressgateway.
+type SNIIngress struct {
+ // ServiceName is the name of the ingressgateway Service.
+ // Defaults to "istio-ingressgateway".
+ ServiceName *string
+ // Namespace is the namespace in which the ingressgateway is deployed in.
+ // Defaults to "istio-ingress".
+ Namespace *string
+ // Labels of the ingressgateway
+ // Defaults to "istio: ingressgateway".
+ Labels map[string]string
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go
new file mode 100644
index 0000000..32928e0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go
@@ -0,0 +1,858 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package config
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ klog "k8s.io/klog"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupBucketControllerConfiguration) DeepCopyInto(out *BackupBucketControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketControllerConfiguration.
+func (in *BackupBucketControllerConfiguration) DeepCopy() *BackupBucketControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupBucketControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupEntryControllerConfiguration) DeepCopyInto(out *BackupEntryControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ if in.DeletionGracePeriodHours != nil {
+ in, out := &in.DeletionGracePeriodHours, &out.DeletionGracePeriodHours
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryControllerConfiguration.
+func (in *BackupEntryControllerConfiguration) DeepCopy() *BackupEntryControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupEntryControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConditionThreshold) DeepCopyInto(out *ConditionThreshold) {
+ *out = *in
+ if in.Duration != nil {
+ in, out := &in.Duration, &out.Duration
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionThreshold.
+func (in *ConditionThreshold) DeepCopy() *ConditionThreshold {
+ if in == nil {
+ return nil
+ }
+ out := new(ConditionThreshold)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationCareControllerConfiguration) DeepCopyInto(out *ControllerInstallationCareControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationCareControllerConfiguration.
+func (in *ControllerInstallationCareControllerConfiguration) DeepCopy() *ControllerInstallationCareControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationCareControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationControllerConfiguration) DeepCopyInto(out *ControllerInstallationControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationControllerConfiguration.
+func (in *ControllerInstallationControllerConfiguration) DeepCopy() *ControllerInstallationControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerInstallationRequiredControllerConfiguration) DeepCopyInto(out *ControllerInstallationRequiredControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationRequiredControllerConfiguration.
+func (in *ControllerInstallationRequiredControllerConfiguration) DeepCopy() *ControllerInstallationRequiredControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerInstallationRequiredControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FluentBit) DeepCopyInto(out *FluentBit) {
+ *out = *in
+ if in.ServiceSection != nil {
+ in, out := &in.ServiceSection, &out.ServiceSection
+ *out = new(string)
+ **out = **in
+ }
+ if in.InputSection != nil {
+ in, out := &in.InputSection, &out.InputSection
+ *out = new(string)
+ **out = **in
+ }
+ if in.OutputSection != nil {
+ in, out := &in.OutputSection, &out.OutputSection
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBit.
+func (in *FluentBit) DeepCopy() *FluentBit {
+ if in == nil {
+ return nil
+ }
+ out := new(FluentBit)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GardenClientConnection) DeepCopyInto(out *GardenClientConnection) {
+ *out = *in
+ out.ClientConnectionConfiguration = in.ClientConnectionConfiguration
+ if in.GardenClusterAddress != nil {
+ in, out := &in.GardenClusterAddress, &out.GardenClusterAddress
+ *out = new(string)
+ **out = **in
+ }
+ if in.GardenClusterCACert != nil {
+ in, out := &in.GardenClusterCACert, &out.GardenClusterCACert
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.BootstrapKubeconfig != nil {
+ in, out := &in.BootstrapKubeconfig, &out.BootstrapKubeconfig
+ *out = new(corev1.SecretReference)
+ **out = **in
+ }
+ if in.KubeconfigSecret != nil {
+ in, out := &in.KubeconfigSecret, &out.KubeconfigSecret
+ *out = new(corev1.SecretReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenClientConnection.
+func (in *GardenClientConnection) DeepCopy() *GardenClientConnection {
+ if in == nil {
+ return nil
+ }
+ out := new(GardenClientConnection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GardenletConfiguration) DeepCopyInto(out *GardenletConfiguration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.GardenClientConnection != nil {
+ in, out := &in.GardenClientConnection, &out.GardenClientConnection
+ *out = new(GardenClientConnection)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SeedClientConnection != nil {
+ in, out := &in.SeedClientConnection, &out.SeedClientConnection
+ *out = new(SeedClientConnection)
+ **out = **in
+ }
+ if in.ShootClientConnection != nil {
+ in, out := &in.ShootClientConnection, &out.ShootClientConnection
+ *out = new(ShootClientConnection)
+ **out = **in
+ }
+ if in.Controllers != nil {
+ in, out := &in.Controllers, &out.Controllers
+ *out = new(GardenletControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(ResourcesConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LeaderElection != nil {
+ in, out := &in.LeaderElection, &out.LeaderElection
+ *out = new(LeaderElectionConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LogLevel != nil {
+ in, out := &in.LogLevel, &out.LogLevel
+ *out = new(string)
+ **out = **in
+ }
+ if in.KubernetesLogLevel != nil {
+ in, out := &in.KubernetesLogLevel, &out.KubernetesLogLevel
+ *out = new(klog.Level)
+ **out = **in
+ }
+ if in.Server != nil {
+ in, out := &in.Server, &out.Server
+ *out = new(ServerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FeatureGates != nil {
+ in, out := &in.FeatureGates, &out.FeatureGates
+ *out = make(map[string]bool, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SeedConfig != nil {
+ in, out := &in.SeedConfig, &out.SeedConfig
+ *out = new(SeedConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SeedSelector != nil {
+ in, out := &in.SeedSelector, &out.SeedSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Logging != nil {
+ in, out := &in.Logging, &out.Logging
+ *out = new(Logging)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SNI != nil {
+ in, out := &in.SNI, &out.SNI
+ *out = new(SNI)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletConfiguration.
+func (in *GardenletConfiguration) DeepCopy() *GardenletConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(GardenletConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GardenletConfiguration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GardenletControllerConfiguration) DeepCopyInto(out *GardenletControllerConfiguration) {
+ *out = *in
+ if in.BackupBucket != nil {
+ in, out := &in.BackupBucket, &out.BackupBucket
+ *out = new(BackupBucketControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BackupEntry != nil {
+ in, out := &in.BackupEntry, &out.BackupEntry
+ *out = new(BackupEntryControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ControllerInstallation != nil {
+ in, out := &in.ControllerInstallation, &out.ControllerInstallation
+ *out = new(ControllerInstallationControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ControllerInstallationCare != nil {
+ in, out := &in.ControllerInstallationCare, &out.ControllerInstallationCare
+ *out = new(ControllerInstallationCareControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ControllerInstallationRequired != nil {
+ in, out := &in.ControllerInstallationRequired, &out.ControllerInstallationRequired
+ *out = new(ControllerInstallationRequiredControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Seed != nil {
+ in, out := &in.Seed, &out.Seed
+ *out = new(SeedControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Shoot != nil {
+ in, out := &in.Shoot, &out.Shoot
+ *out = new(ShootControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ShootCare != nil {
+ in, out := &in.ShootCare, &out.ShootCare
+ *out = new(ShootCareControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ShootStateSync != nil {
+ in, out := &in.ShootStateSync, &out.ShootStateSync
+ *out = new(ShootStateSyncControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ShootedSeedRegistration != nil {
+ in, out := &in.ShootedSeedRegistration, &out.ShootedSeedRegistration
+ *out = new(ShootedSeedRegistrationControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SeedAPIServerNetworkPolicy != nil {
+ in, out := &in.SeedAPIServerNetworkPolicy, &out.SeedAPIServerNetworkPolicy
+ *out = new(SeedAPIServerNetworkPolicyControllerConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletControllerConfiguration.
+func (in *GardenletControllerConfiguration) DeepCopy() *GardenletControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(GardenletControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPSServer) DeepCopyInto(out *HTTPSServer) {
+ *out = *in
+ out.Server = in.Server
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(TLSServer)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSServer.
+func (in *HTTPSServer) DeepCopy() *HTTPSServer {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPSServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) {
+ *out = *in
+ out.LeaderElectionConfiguration = in.LeaderElectionConfiguration
+ if in.LockObjectNamespace != nil {
+ in, out := &in.LockObjectNamespace, &out.LockObjectNamespace
+ *out = new(string)
+ **out = **in
+ }
+ if in.LockObjectName != nil {
+ in, out := &in.LockObjectName, &out.LockObjectName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration.
+func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(LeaderElectionConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Logging) DeepCopyInto(out *Logging) {
+ *out = *in
+ if in.FluentBit != nil {
+ in, out := &in.FluentBit, &out.FluentBit
+ *out = new(FluentBit)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging.
+func (in *Logging) DeepCopy() *Logging {
+ if in == nil {
+ return nil
+ }
+ out := new(Logging)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourcesConfiguration) DeepCopyInto(out *ResourcesConfiguration) {
+ *out = *in
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(corev1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ if in.Reserved != nil {
+ in, out := &in.Reserved, &out.Reserved
+ *out = make(corev1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesConfiguration.
+func (in *ResourcesConfiguration) DeepCopy() *ResourcesConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourcesConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SNI) DeepCopyInto(out *SNI) {
+ *out = *in
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = new(SNIIngress)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNI.
+func (in *SNI) DeepCopy() *SNI {
+ if in == nil {
+ return nil
+ }
+ out := new(SNI)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SNIIngress) DeepCopyInto(out *SNIIngress) {
+ *out = *in
+ if in.ServiceName != nil {
+ in, out := &in.ServiceName, &out.ServiceName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Namespace != nil {
+ in, out := &in.Namespace, &out.Namespace
+ *out = new(string)
+ **out = **in
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNIIngress.
+func (in *SNIIngress) DeepCopy() *SNIIngress {
+ if in == nil {
+ return nil
+ }
+ out := new(SNIIngress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedAPIServerNetworkPolicyControllerConfiguration) DeepCopyInto(out *SeedAPIServerNetworkPolicyControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedAPIServerNetworkPolicyControllerConfiguration.
+func (in *SeedAPIServerNetworkPolicyControllerConfiguration) DeepCopy() *SeedAPIServerNetworkPolicyControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedAPIServerNetworkPolicyControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedClientConnection) DeepCopyInto(out *SeedClientConnection) {
+ *out = *in
+ out.ClientConnectionConfiguration = in.ClientConnectionConfiguration
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedClientConnection.
+func (in *SeedClientConnection) DeepCopy() *SeedClientConnection {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedClientConnection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedConfig) DeepCopyInto(out *SeedConfig) {
+ *out = *in
+ in.Seed.DeepCopyInto(&out.Seed)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedConfig.
+func (in *SeedConfig) DeepCopy() *SeedConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeedControllerConfiguration) DeepCopyInto(out *SeedControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedControllerConfiguration.
+func (in *SeedControllerConfiguration) DeepCopy() *SeedControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(SeedControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Server) DeepCopyInto(out *Server) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server.
+func (in *Server) DeepCopy() *Server {
+ if in == nil {
+ return nil
+ }
+ out := new(Server)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServerConfiguration) DeepCopyInto(out *ServerConfiguration) {
+ *out = *in
+ in.HTTPS.DeepCopyInto(&out.HTTPS)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerConfiguration.
+func (in *ServerConfiguration) DeepCopy() *ServerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ServerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootCareControllerConfiguration) DeepCopyInto(out *ShootCareControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.StaleExtensionHealthChecks != nil {
+ in, out := &in.StaleExtensionHealthChecks, &out.StaleExtensionHealthChecks
+ *out = new(StaleExtensionHealthChecks)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ConditionThresholds != nil {
+ in, out := &in.ConditionThresholds, &out.ConditionThresholds
+ *out = make([]ConditionThreshold, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootCareControllerConfiguration.
+func (in *ShootCareControllerConfiguration) DeepCopy() *ShootCareControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootCareControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootClientConnection) DeepCopyInto(out *ShootClientConnection) {
+ *out = *in
+ out.ClientConnectionConfiguration = in.ClientConnectionConfiguration
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootClientConnection.
+func (in *ShootClientConnection) DeepCopy() *ShootClientConnection {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootClientConnection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootControllerConfiguration) DeepCopyInto(out *ShootControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ if in.ProgressReportPeriod != nil {
+ in, out := &in.ProgressReportPeriod, &out.ProgressReportPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.ReconcileInMaintenanceOnly != nil {
+ in, out := &in.ReconcileInMaintenanceOnly, &out.ReconcileInMaintenanceOnly
+ *out = new(bool)
+ **out = **in
+ }
+ if in.RespectSyncPeriodOverwrite != nil {
+ in, out := &in.RespectSyncPeriodOverwrite, &out.RespectSyncPeriodOverwrite
+ *out = new(bool)
+ **out = **in
+ }
+ if in.RetryDuration != nil {
+ in, out := &in.RetryDuration, &out.RetryDuration
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.DNSEntryTTLSeconds != nil {
+ in, out := &in.DNSEntryTTLSeconds, &out.DNSEntryTTLSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootControllerConfiguration.
+func (in *ShootControllerConfiguration) DeepCopy() *ShootControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootStateSyncControllerConfiguration) DeepCopyInto(out *ShootStateSyncControllerConfiguration) {
+ *out = *in
+ if in.ConcurrentSyncs != nil {
+ in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs
+ *out = new(int)
+ **out = **in
+ }
+ if in.SyncPeriod != nil {
+ in, out := &in.SyncPeriod, &out.SyncPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSyncControllerConfiguration.
+func (in *ShootStateSyncControllerConfiguration) DeepCopy() *ShootStateSyncControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootStateSyncControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ShootedSeedRegistrationControllerConfiguration) DeepCopyInto(out *ShootedSeedRegistrationControllerConfiguration) {
+ *out = *in
+ if in.SyncJitterPeriod != nil {
+ in, out := &in.SyncJitterPeriod, &out.SyncJitterPeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootedSeedRegistrationControllerConfiguration.
+func (in *ShootedSeedRegistrationControllerConfiguration) DeepCopy() *ShootedSeedRegistrationControllerConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ShootedSeedRegistrationControllerConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaleExtensionHealthChecks) DeepCopyInto(out *StaleExtensionHealthChecks) {
+ *out = *in
+ if in.Threshold != nil {
+ in, out := &in.Threshold, &out.Threshold
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaleExtensionHealthChecks.
+func (in *StaleExtensionHealthChecks) DeepCopy() *StaleExtensionHealthChecks {
+ if in == nil {
+ return nil
+ }
+ out := new(StaleExtensionHealthChecks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSServer) DeepCopyInto(out *TLSServer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSServer.
+func (in *TLSServer) DeepCopy() *TLSServer {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSServer)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/logger/logger.go b/vendor/github.com/gardener/gardener/pkg/logger/logger.go
new file mode 100644
index 0000000..0dd8d05
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/logger/logger.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logger
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/gardener/gardener/pkg/utils"
+
+ "github.com/sirupsen/logrus"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Logger is the standard logger for the Gardener which is used for all messages which are not Shoot
+// cluster specific.
+var Logger *logrus.Logger
+
+// NewLogger creates a new logrus logger.
+// It uses STDERR as output channel and evaluates the value of the --log-level command line argument in order
+// to set the log level.
+// Example output: time="2017-06-08T13:00:28+02:00" level=info msg="gardener started successfully".
+func NewLogger(logLevel string) *logrus.Logger {
+ var level logrus.Level
+
+ switch logLevel {
+ case "debug":
+ level = logrus.DebugLevel
+ case "", "info":
+ level = logrus.InfoLevel
+ case "error":
+ level = logrus.ErrorLevel
+ default:
+ panic("The specified log level is not supported.")
+ }
+
+ logger := &logrus.Logger{
+ Out: os.Stderr,
+ Level: level,
+ Formatter: &logrus.TextFormatter{
+ DisableColors: true,
+ },
+ }
+ Logger = logger
+ return logger
+}
+
+// NewNopLogger instantiates a new logger that logs to ioutil.Discard.
+func NewNopLogger() *logrus.Logger {
+ logger := logrus.New()
+ logger.Out = ioutil.Discard
+ return logger
+}
+
+// AddWriter returns a logger that uses the tests writer (e.g., GingkoWriter) as output channel
+func AddWriter(logger *logrus.Logger, writer io.Writer) *logrus.Logger {
+ logger.Out = writer
+ return logger
+}
+
+// NewShootLogger extends an existing logrus logger and adds an additional field containing the Shoot cluster name
+// and the project in the Garden cluster to the output. If an is provided it will be printed for every
+// log message.
+// Example output: time="2017-06-08T13:00:49+02:00" level=info msg="Creating namespace in seed cluster" shoot=core/crazy-botany.
+func NewShootLogger(logger *logrus.Logger, shoot, project string) *logrus.Entry {
+ return logger.WithField("shoot", fmt.Sprintf("%s/%s", project, shoot))
+}
+
+// NewFieldLogger extends an existing logrus logger and adds the provided additional field.
+// Example output: time="2017-06-08T13:00:49+02:00" level=info msg="something" =.
+func NewFieldLogger(logger logrus.FieldLogger, fieldKey, fieldValue string) *logrus.Entry {
+ return logger.WithField(fieldKey, fieldValue)
+}
+
+// IDFieldName is the name of the id field for a logger.
+const IDFieldName = "process_id"
+
+// NewIDLogger extends an existing logrus logger with a randomly generated id field.
+// Example output: time="2017-06-08T13:00:49+02:00" level=info msg="something" id=123abcde.
+func NewIDLogger(logger logrus.FieldLogger) logrus.FieldLogger {
+ id, err := utils.GenerateRandomString(8)
+ utilruntime.Must(err)
+ return NewFieldLogger(logger, IDFieldName, id)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/mock/go/context/doc.go b/vendor/github.com/gardener/gardener/pkg/mock/go/context/doc.go
new file mode 100644
index 0000000..d6d46c4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/mock/go/context/doc.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate mockgen -package=context -destination=funcs.go github.com/gardener/gardener/pkg/mock/go/context WithTimeout,CancelFunc
+//go:generate mockgen -package=context -destination=mocks.go github.com/gardener/gardener/pkg/mock/go/context Context
+
+package context
+
+import (
+ "context"
+ "time"
+)
+
+// Context allows mocking context.Context. The interface is necessary due to an issue with
+// golang/mock not being able to generate code for go's core context package.
+type Context interface {
+ context.Context
+}
+
+// WithTimeout is an interface that allows mocking `WithTimeout`.
+type WithTimeout interface {
+ Do(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc)
+}
+
+// CancelFunc is an interface that allows mocking `CancelFunc`.
+type CancelFunc interface {
+ Do()
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/mock/go/context/funcs.go b/vendor/github.com/gardener/gardener/pkg/mock/go/context/funcs.go
new file mode 100644
index 0000000..7fa12c4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/mock/go/context/funcs.go
@@ -0,0 +1,86 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/gardener/gardener/pkg/mock/go/context (interfaces: WithTimeout,CancelFunc)
+
+// Package context is a generated GoMock package.
+package context
+
+import (
+ context "context"
+ reflect "reflect"
+ time "time"
+
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockWithTimeout is a mock of WithTimeout interface.
+type MockWithTimeout struct {
+ ctrl *gomock.Controller
+ recorder *MockWithTimeoutMockRecorder
+}
+
+// MockWithTimeoutMockRecorder is the mock recorder for MockWithTimeout.
+type MockWithTimeoutMockRecorder struct {
+ mock *MockWithTimeout
+}
+
+// NewMockWithTimeout creates a new mock instance.
+func NewMockWithTimeout(ctrl *gomock.Controller) *MockWithTimeout {
+ mock := &MockWithTimeout{ctrl: ctrl}
+ mock.recorder = &MockWithTimeoutMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockWithTimeout) EXPECT() *MockWithTimeoutMockRecorder {
+ return m.recorder
+}
+
+// Do mocks base method.
+func (m *MockWithTimeout) Do(arg0 context.Context, arg1 time.Duration) (context.Context, context.CancelFunc) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Do", arg0, arg1)
+ ret0, _ := ret[0].(context.Context)
+ ret1, _ := ret[1].(context.CancelFunc)
+ return ret0, ret1
+}
+
+// Do indicates an expected call of Do.
+func (mr *MockWithTimeoutMockRecorder) Do(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockWithTimeout)(nil).Do), arg0, arg1)
+}
+
+// MockCancelFunc is a mock of CancelFunc interface.
+type MockCancelFunc struct {
+ ctrl *gomock.Controller
+ recorder *MockCancelFuncMockRecorder
+}
+
+// MockCancelFuncMockRecorder is the mock recorder for MockCancelFunc.
+type MockCancelFuncMockRecorder struct {
+ mock *MockCancelFunc
+}
+
+// NewMockCancelFunc creates a new mock instance.
+func NewMockCancelFunc(ctrl *gomock.Controller) *MockCancelFunc {
+ mock := &MockCancelFunc{ctrl: ctrl}
+ mock.recorder = &MockCancelFuncMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockCancelFunc) EXPECT() *MockCancelFuncMockRecorder {
+ return m.recorder
+}
+
+// Do mocks base method.
+func (m *MockCancelFunc) Do() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Do")
+}
+
+// Do indicates an expected call of Do.
+func (mr *MockCancelFuncMockRecorder) Do() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockCancelFunc)(nil).Do))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/mock/go/context/mocks.go b/vendor/github.com/gardener/gardener/pkg/mock/go/context/mocks.go
new file mode 100644
index 0000000..f3d53dc
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/mock/go/context/mocks.go
@@ -0,0 +1,92 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/gardener/gardener/pkg/mock/go/context (interfaces: Context)
+
+// Package context is a generated GoMock package.
+package context
+
+import (
+ reflect "reflect"
+ time "time"
+
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockContext is a mock of Context interface.
+type MockContext struct {
+ ctrl *gomock.Controller
+ recorder *MockContextMockRecorder
+}
+
+// MockContextMockRecorder is the mock recorder for MockContext.
+type MockContextMockRecorder struct {
+ mock *MockContext
+}
+
+// NewMockContext creates a new mock instance.
+func NewMockContext(ctrl *gomock.Controller) *MockContext {
+ mock := &MockContext{ctrl: ctrl}
+ mock.recorder = &MockContextMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockContext) EXPECT() *MockContextMockRecorder {
+ return m.recorder
+}
+
+// Deadline mocks base method.
+func (m *MockContext) Deadline() (time.Time, bool) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Deadline")
+ ret0, _ := ret[0].(time.Time)
+ ret1, _ := ret[1].(bool)
+ return ret0, ret1
+}
+
+// Deadline indicates an expected call of Deadline.
+func (mr *MockContextMockRecorder) Deadline() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deadline", reflect.TypeOf((*MockContext)(nil).Deadline))
+}
+
+// Done mocks base method.
+func (m *MockContext) Done() <-chan struct{} {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Done")
+ ret0, _ := ret[0].(<-chan struct{})
+ return ret0
+}
+
+// Done indicates an expected call of Done.
+func (mr *MockContextMockRecorder) Done() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockContext)(nil).Done))
+}
+
+// Err mocks base method.
+func (m *MockContext) Err() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Err")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Err indicates an expected call of Err.
+func (mr *MockContextMockRecorder) Err() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockContext)(nil).Err))
+}
+
+// Value mocks base method.
+func (m *MockContext) Value(arg0 interface{}) interface{} {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Value", arg0)
+ ret0, _ := ret[0].(interface{})
+ return ret0
+}
+
+// Value indicates an expected call of Value.
+func (mr *MockContextMockRecorder) Value(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockContext)(nil).Value), arg0)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/extensions.go b/vendor/github.com/gardener/gardener/pkg/operation/common/extensions.go
new file mode 100644
index 0000000..573a427
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/operation/common/extensions.go
@@ -0,0 +1,568 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/gardener/gardener/pkg/api/extensions"
+ gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ gardencorev1alpha1helper "github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper"
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ "github.com/gardener/gardener/pkg/utils"
+ "github.com/gardener/gardener/pkg/utils/flow"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+ "github.com/gardener/gardener/pkg/utils/kubernetes/health"
+ "github.com/gardener/gardener/pkg/utils/retry"
+
+ "github.com/sirupsen/logrus"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+// SyncClusterResourceToSeed creates or updates the `extensions.gardener.cloud/v1alpha1.Cluster` resource in the seed
+// cluster by adding the shoot, seed, and cloudprofile specification.
+func SyncClusterResourceToSeed(ctx context.Context, client client.Client, clusterName string, shoot *gardencorev1beta1.Shoot, cloudProfile *gardencorev1beta1.CloudProfile, seed *gardencorev1beta1.Seed) error {
+ if shoot.Spec.SeedName == nil {
+ return nil
+ }
+
+ var (
+ cluster = &extensionsv1alpha1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: clusterName,
+ },
+ }
+
+ cloudProfileObj *gardencorev1beta1.CloudProfile
+ seedObj *gardencorev1beta1.Seed
+ shootObj *gardencorev1beta1.Shoot
+ )
+
+ if cloudProfile != nil {
+ cloudProfileObj = cloudProfile.DeepCopy()
+ cloudProfileObj.TypeMeta = metav1.TypeMeta{
+ APIVersion: gardencorev1beta1.SchemeGroupVersion.String(),
+ Kind: "CloudProfile",
+ }
+ }
+
+ if seed != nil {
+ seedObj = seed.DeepCopy()
+ seedObj.TypeMeta = metav1.TypeMeta{
+ APIVersion: gardencorev1beta1.SchemeGroupVersion.String(),
+ Kind: "Seed",
+ }
+ }
+
+ if shoot != nil {
+ shootObj = shoot.DeepCopy()
+ shootObj.TypeMeta = metav1.TypeMeta{
+ APIVersion: gardencorev1beta1.SchemeGroupVersion.String(),
+ Kind: "Shoot",
+ }
+ }
+
+ _, err := controllerutil.CreateOrUpdate(ctx, client, cluster, func() error {
+ if cloudProfileObj != nil {
+ cluster.Spec.CloudProfile = runtime.RawExtension{Object: cloudProfileObj}
+ }
+ if seedObj != nil {
+ cluster.Spec.Seed = runtime.RawExtension{Object: seedObj}
+ }
+ if shootObj != nil {
+ cluster.Spec.Shoot = runtime.RawExtension{Object: shootObj}
+ }
+ return nil
+ })
+ return err
+}
+
+// WaitUntilExtensionCRReady waits until the given extension resource has become ready.
+func WaitUntilExtensionCRReady(
+ ctx context.Context,
+ c client.Client,
+ logger logrus.FieldLogger,
+ newObjFunc func() client.Object,
+ kind string,
+ namespace string,
+ name string,
+ interval time.Duration,
+ severeThreshold time.Duration,
+ timeout time.Duration,
+ postReadyFunc func(runtime.Object) error,
+) error {
+ return WaitUntilObjectReadyWithHealthFunction(
+ ctx,
+ c,
+ logger,
+ health.CheckExtensionObject,
+ newObjFunc,
+ kind,
+ namespace,
+ name,
+ interval,
+ severeThreshold,
+ timeout,
+ postReadyFunc,
+ )
+}
+
+// WaitUntilObjectReadyWithHealthFunction waits until the given resource has become ready. It takes the health check
+// function that should be executed.
+func WaitUntilObjectReadyWithHealthFunction(
+ ctx context.Context,
+ c client.Client,
+ logger logrus.FieldLogger,
+ healthFunc health.Func,
+ newObjFunc func() client.Object,
+ kind string,
+ namespace string,
+ name string,
+ interval time.Duration,
+ severeThreshold time.Duration,
+ timeout time.Duration,
+ postReadyFunc func(runtime.Object) error,
+) error {
+ var (
+ errorWithCode *gardencorev1beta1helper.ErrorWithCodes
+ lastObservedError error
+ retryCountUntilSevere int
+ )
+
+ if err := retry.UntilTimeout(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
+ retryCountUntilSevere++
+
+ obj := newObjFunc()
+ if err := c.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj); err != nil {
+ if apierrors.IsNotFound(err) {
+ return retry.MinorError(err)
+ }
+ return retry.SevereError(err)
+ }
+
+ if err := healthFunc(obj); err != nil {
+ lastObservedError = err
+ logger.WithError(err).Errorf("%s did not get ready yet", extensionKey(kind, namespace, name))
+ if errors.As(err, &errorWithCode) {
+ return retry.MinorOrSevereError(retryCountUntilSevere, int(severeThreshold.Nanoseconds()/interval.Nanoseconds()), err)
+ }
+ return retry.MinorError(err)
+ }
+
+ if postReadyFunc != nil {
+ if err := postReadyFunc(obj); err != nil {
+ return retry.SevereError(err)
+ }
+ }
+
+ return retry.Ok()
+ }); err != nil {
+ message := fmt.Sprintf("Error while waiting for %s to become ready", extensionKey(kind, namespace, name))
+ if lastObservedError != nil {
+ return gardencorev1beta1helper.NewErrorWithCodes(formatErrorMessage(message, lastObservedError.Error()), gardencorev1beta1helper.ExtractErrorCodes(lastObservedError)...)
+ }
+ return errors.New(formatErrorMessage(message, err.Error()))
+ }
+
+ return nil
+}
+
+// DeleteExtensionCR deletes an extension resource.
+func DeleteExtensionCR(
+ ctx context.Context,
+ c client.Client,
+ newObjFunc func() extensionsv1alpha1.Object,
+ namespace string,
+ name string,
+ deleteOpts ...client.DeleteOption,
+) error {
+ obj := newObjFunc()
+ obj.SetNamespace(namespace)
+ obj.SetName(name)
+
+ if err := ConfirmDeletion(ctx, c, obj); err != nil {
+ return err
+ }
+
+ return client.IgnoreNotFound(c.Delete(ctx, obj, deleteOpts...))
+}
+
+// DeleteExtensionCRs lists all extension resources and loops over them. It executes the given for each
+// of them, and if it evaluates to true then the resource will be deleted.
+func DeleteExtensionCRs(
+ ctx context.Context,
+ c client.Client,
+ listObj client.ObjectList,
+ newObjFunc func() extensionsv1alpha1.Object,
+ namespace string,
+ predicateFunc func(obj extensionsv1alpha1.Object) bool,
+ deleteOpts ...client.DeleteOption,
+) error {
+ fns, err := applyFuncToExtensionResources(ctx, c, listObj, namespace, predicateFunc, func(ctx context.Context, obj extensionsv1alpha1.Object) error {
+ return DeleteExtensionCR(
+ ctx,
+ c,
+ newObjFunc,
+ obj.GetNamespace(),
+ obj.GetName(),
+ deleteOpts...,
+ )
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return flow.Parallel(fns...)(ctx)
+}
+
+// WaitUntilExtensionCRsDeleted lists all extension resources and loops over them. It executes the given
+// for each of them, and if it evaluates to true then it waits for the resource to be deleted.
+func WaitUntilExtensionCRsDeleted(
+ ctx context.Context,
+ c client.Client,
+ logger logrus.FieldLogger,
+ listObj client.ObjectList,
+ newObjFunc func() extensionsv1alpha1.Object,
+ kind string,
+ namespace string,
+ interval time.Duration,
+ timeout time.Duration,
+ predicateFunc func(obj extensionsv1alpha1.Object) bool,
+) error {
+ fns, err := applyFuncToExtensionResources(
+ ctx,
+ c,
+ listObj,
+ namespace,
+ func(obj extensionsv1alpha1.Object) bool {
+ if obj.GetDeletionTimestamp() == nil {
+ return false
+ }
+ if predicateFunc != nil && !predicateFunc(obj) {
+ return false
+ }
+ return true
+ },
+ func(ctx context.Context, obj extensionsv1alpha1.Object) error {
+ return WaitUntilExtensionCRDeleted(
+ ctx,
+ c,
+ logger,
+ newObjFunc,
+ kind,
+ obj.GetNamespace(),
+ obj.GetName(),
+ interval,
+ timeout,
+ )
+ },
+ )
+
+ if err != nil {
+ return err
+ }
+
+ return flow.Parallel(fns...)(ctx)
+}
+
+// WaitUntilExtensionCRDeleted waits until an extension resource is deleted from the system.
+func WaitUntilExtensionCRDeleted(
+ ctx context.Context,
+ c client.Client,
+ logger logrus.FieldLogger,
+ newObjFunc func() extensionsv1alpha1.Object,
+ kind string,
+ namespace string,
+ name string,
+ interval time.Duration,
+ timeout time.Duration,
+) error {
+ var lastObservedError error
+
+ if err := retry.UntilTimeout(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
+ obj := newObjFunc()
+ if err := c.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj); err != nil {
+ if apierrors.IsNotFound(err) {
+ return retry.Ok()
+ }
+ return retry.SevereError(err)
+ }
+
+ acc, err := extensions.Accessor(obj)
+ if err != nil {
+ return retry.SevereError(err)
+ }
+
+ if lastErr := acc.GetExtensionStatus().GetLastError(); lastErr != nil {
+ logger.Errorf("%s did not get deleted yet, lastError is: %s", extensionKey(kind, namespace, name), lastErr.Description)
+ lastObservedError = gardencorev1beta1helper.NewErrorWithCodes(lastErr.Description, lastErr.Codes...)
+ }
+
+ var message = fmt.Sprintf("%s is still present", extensionKey(kind, namespace, name))
+ if lastObservedError != nil {
+ message += fmt.Sprintf(", last observed error: %s", lastObservedError.Error())
+ }
+ return retry.MinorError(fmt.Errorf(message))
+ }); err != nil {
+ message := fmt.Sprintf("Failed to delete %s", extensionKey(kind, namespace, name))
+ if lastObservedError != nil {
+ return gardencorev1beta1helper.NewErrorWithCodes(formatErrorMessage(message, lastObservedError.Error()), gardencorev1beta1helper.ExtractErrorCodes(lastObservedError)...)
+ }
+ return errors.New(formatErrorMessage(message, err.Error()))
+ }
+
+ return nil
+}
+
+// RestoreExtensionWithDeployFunction deploys the extension resource with the passed in deployFunc and sets its operation annotation to wait-for-state.
+// It then restores the state of the extension resource from the ShootState, creates any required state resources and sets the operation annotation to restore.
+func RestoreExtensionWithDeployFunction(
+ ctx context.Context,
+ shootState *gardencorev1alpha1.ShootState,
+ c client.Client,
+ resourceKind string,
+ namespace string,
+ deployFunc func(ctx context.Context, operationAnnotation string) (extensionsv1alpha1.Object, error),
+) error {
+ extensionObj, err := deployFunc(ctx, v1beta1constants.GardenerOperationWaitForState)
+ if err != nil {
+ return err
+ }
+
+ if err := RestoreExtensionObjectState(ctx, c, shootState, namespace, extensionObj, resourceKind); err != nil {
+ return err
+ }
+
+ return AnnotateExtensionObjectWithOperation(ctx, c, extensionObj, v1beta1constants.GardenerOperationRestore)
+}
+
+// RestoreExtensionObjectState restores the status.state field of the extension resources and deploys any required resources from the provided shoot state
+func RestoreExtensionObjectState(
+ ctx context.Context,
+ c client.Client,
+ shootState *gardencorev1alpha1.ShootState,
+ namespace string,
+ extensionObj extensionsv1alpha1.Object,
+ resourceKind string,
+) error {
+ var resourceRefs []autoscalingv1.CrossVersionObjectReference
+ if shootState.Spec.Extensions != nil {
+ resourceName := extensionObj.GetName()
+ purpose := extensionObj.GetExtensionSpec().GetExtensionPurpose()
+ list := gardencorev1alpha1helper.ExtensionResourceStateList(shootState.Spec.Extensions)
+ if extensionResourceState := list.Get(resourceKind, &resourceName, purpose); extensionResourceState != nil {
+ extensionStatus := extensionObj.GetExtensionStatus()
+ extensionStatus.SetState(extensionResourceState.State)
+ extensionStatus.SetResources(extensionResourceState.Resources)
+
+ if err := c.Status().Update(ctx, extensionObj); err != nil {
+ return err
+ }
+
+ for _, r := range extensionResourceState.Resources {
+ resourceRefs = append(resourceRefs, r.ResourceRef)
+ }
+ }
+ }
+ if shootState.Spec.Resources != nil {
+ list := gardencorev1alpha1helper.ResourceDataList(shootState.Spec.Resources)
+ for _, resourceRef := range resourceRefs {
+ resourceData := list.Get(&resourceRef)
+ if resourceData != nil {
+ obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&resourceData.Data)
+ if err != nil {
+ return err
+ }
+ if err := utils.CreateOrUpdateObjectByRef(ctx, c, &resourceRef, namespace, obj); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// MigrateExtensionCR adds the migrate operation annotation to the extension CR.
+func MigrateExtensionCR(
+ ctx context.Context,
+ c client.Client,
+ newObjFunc func() extensionsv1alpha1.Object,
+ namespace string,
+ name string,
+) error {
+ obj := newObjFunc()
+ obj.SetNamespace(namespace)
+ obj.SetName(name)
+
+ if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, obj); err != nil {
+ if client.IgnoreNotFound(err) == nil {
+ return nil
+ }
+ return err
+ }
+
+ return AnnotateExtensionObjectWithOperation(ctx, c, obj, v1beta1constants.GardenerOperationMigrate)
+}
+
+// MigrateExtensionCRs lists all extension resources of a given kind and annotates them with the Migrate operation.
+func MigrateExtensionCRs(
+ ctx context.Context,
+ c client.Client,
+ listObj client.ObjectList,
+ newObjFunc func() extensionsv1alpha1.Object,
+ namespace string,
+) error {
+ fns, err := applyFuncToExtensionResources(ctx, c, listObj, namespace, nil, func(ctx context.Context, o extensionsv1alpha1.Object) error {
+ return MigrateExtensionCR(ctx, c, newObjFunc, o.GetNamespace(), o.GetName())
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return flow.Parallel(fns...)(ctx)
+}
+
+// WaitUntilExtensionCRMigrated waits until the migrate operation for the extension resource is successful.
+func WaitUntilExtensionCRMigrated(
+ ctx context.Context,
+ c client.Client,
+ newObjFunc func() extensionsv1alpha1.Object,
+ namespace string,
+ name string,
+ interval time.Duration,
+ timeout time.Duration,
+) error {
+ obj := newObjFunc()
+ obj.SetNamespace(namespace)
+ obj.SetName(name)
+
+ return retry.UntilTimeout(ctx, interval, timeout, func(ctx context.Context) (done bool, err error) {
+ if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, obj); err != nil {
+ if client.IgnoreNotFound(err) == nil {
+ return retry.Ok()
+ }
+ return retry.SevereError(err)
+ }
+
+ if extensionObjStatus := obj.GetExtensionStatus(); extensionObjStatus != nil {
+ if lastOperation := extensionObjStatus.GetLastOperation(); lastOperation != nil {
+ if lastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate && lastOperation.State == gardencorev1beta1.LastOperationStateSucceeded {
+ return retry.Ok()
+ }
+ }
+ }
+
+ var extensionType string
+ if extensionSpec := obj.GetExtensionSpec(); extensionSpec != nil {
+ extensionType = extensionSpec.GetExtensionType()
+ }
+ return retry.MinorError(fmt.Errorf("lastOperation for extension CR %s with name %s and type %s is not Migrate=Succeeded", obj.GetObjectKind().GroupVersionKind().Kind, name, extensionType))
+ })
+}
+
+// WaitUntilExtensionCRsMigrated lists all extension resources of a given kind and waits until they are migrated
+func WaitUntilExtensionCRsMigrated(
+ ctx context.Context,
+ c client.Client,
+ listObj client.ObjectList,
+ newObjFunc func() extensionsv1alpha1.Object,
+ namespace string,
+ interval time.Duration,
+ timeout time.Duration,
+) error {
+ fns, err := applyFuncToExtensionResources(ctx, c, listObj, namespace, nil, func(ctx context.Context, object extensionsv1alpha1.Object) error {
+ return WaitUntilExtensionCRMigrated(
+ ctx,
+ c,
+ newObjFunc,
+ object.GetNamespace(),
+ object.GetName(),
+ interval,
+ timeout,
+ )
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return flow.Parallel(fns...)(ctx)
+}
+
+// AnnotateExtensionObjectWithOperation annotates the extension resource with the provided operation annotation value.
+func AnnotateExtensionObjectWithOperation(ctx context.Context, c client.Client, extensionObj extensionsv1alpha1.Object, operation string) error {
+ extensionObjCopy := extensionObj.DeepCopyObject()
+ kutil.SetMetaDataAnnotation(extensionObj, v1beta1constants.GardenerOperation, operation)
+ kutil.SetMetaDataAnnotation(extensionObj, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String())
+ return c.Patch(ctx, extensionObj, client.MergeFrom(extensionObjCopy))
+}
+
+func applyFuncToExtensionResources(
+ ctx context.Context,
+ c client.Client,
+ listObj client.ObjectList,
+ namespace string,
+ predicateFunc func(obj extensionsv1alpha1.Object) bool,
+ applyFunc func(ctx context.Context, object extensionsv1alpha1.Object) error,
+) ([]flow.TaskFn, error) {
+ if err := c.List(ctx, listObj, client.InNamespace(namespace)); err != nil {
+ return nil, err
+ }
+
+ fns := make([]flow.TaskFn, 0, meta.LenList(listObj))
+
+ if err := meta.EachListItem(listObj, func(obj runtime.Object) error {
+ o, ok := obj.(extensionsv1alpha1.Object)
+ if !ok {
+ return fmt.Errorf("expected extensionsv1alpha1.Object but got %T", obj)
+ }
+
+ if predicateFunc != nil && !predicateFunc(o) {
+ return nil
+ }
+
+ fns = append(fns, func(ctx context.Context) error {
+ return applyFunc(ctx, o)
+ })
+
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ return fns, nil
+}
+
+func extensionKey(kind, namespace, name string) string {
+ return fmt.Sprintf("%s %s/%s", kind, namespace, name)
+}
+
+func formatErrorMessage(message, description string) string {
+ return fmt.Sprintf("%s: %s", message, description)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/managedresources.go b/vendor/github.com/gardener/gardener/pkg/operation/common/managedresources.go
new file mode 100644
index 0000000..5b1bb54
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/operation/common/managedresources.go
@@ -0,0 +1,106 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+
+ "github.com/gardener/gardener-resource-manager/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+const (
+ // ManagedResourceLabelKeyOrigin is a key for a label on a managed resource with the value 'origin'.
+ ManagedResourceLabelKeyOrigin = "origin"
+ // ManagedResourceLabelValueGardener is a value for a label on a managed resource with the value 'gardener'.
+ ManagedResourceLabelValueGardener = "gardener"
+
+ // ManagedResourceSecretPrefix is the prefix that is used for secrets referenced by managed resources.
+ ManagedResourceSecretPrefix = "managedresource-"
+)
+
+// DeployManagedResourceForShoot deploys a ManagedResource CR for the shoot's gardener-resource-manager.
+func DeployManagedResourceForShoot(ctx context.Context, c client.Client, name, namespace string, keepObjects bool, data map[string][]byte) error {
+ return deployManagedResource(ctx, c, name, namespace, data, NewManagedResourceForShoot(c, name, namespace, keepObjects))
+}
+
+// DeleteManagedResourceForShoot deploys a ManagedResource CR for the shoot's gardener-resource-manager.
+func DeleteManagedResourceForShoot(ctx context.Context, c client.Client, name, namespace string) error {
+ return deleteManagedResource(ctx, c, name, namespace, manager.NewManagedResource(c).WithNamespacedName(namespace, name))
+}
+
+// DeployManagedResourceForSeed deploys a ManagedResource CR for the seed's gardener-resource-manager.
+func DeployManagedResourceForSeed(ctx context.Context, c client.Client, name, namespace string, keepObjects bool, data map[string][]byte) error {
+ return deployManagedResource(ctx, c, name, namespace, data, NewManagedResourceForSeed(c, name, namespace, keepObjects))
+}
+
+// DeleteManagedResourceForSeed deploys a ManagedResource CR for the seed's gardener-resource-manager.
+func DeleteManagedResourceForSeed(ctx context.Context, c client.Client, name, namespace string) error {
+ return deleteManagedResource(ctx, c, name, namespace, manager.NewManagedResource(c).WithNamespacedName(namespace, name))
+}
+
+func deployManagedResource(ctx context.Context, c client.Client, name, namespace string, data map[string][]byte, managedResource *manager.ManagedResource) error {
+ secretName, secret := NewManagedResourceSecret(c, name, namespace)
+
+ if err := secret.WithKeyValues(data).Reconcile(ctx); err != nil {
+ return err
+ }
+
+ return managedResource.WithSecretRef(secretName).Reconcile(ctx)
+}
+
+func deleteManagedResource(ctx context.Context, c client.Client, name, namespace string, managedResource *manager.ManagedResource) error {
+ _, secret := NewManagedResourceSecret(c, name, namespace)
+
+ if err := managedResource.Delete(ctx); err != nil {
+ return err
+ }
+ return secret.Delete(ctx)
+}
+
+// NewManagedResourceSecret constructs a new Secret object containing manifests managed by the Gardener-Resource-Manager
+// which can be reconciled.
+func NewManagedResourceSecret(c client.Client, name, namespace string) (string, *manager.Secret) {
+ secretName := ManagedResourceSecretName(name)
+ return secretName, manager.NewSecret(c).WithNamespacedName(namespace, secretName)
+}
+
+// NewManagedResourceForShoot constructs a new ManagedResource object for the shoot's Gardener-Resource-Manager.
+func NewManagedResourceForShoot(c client.Client, name, namespace string, keepObjects bool) *manager.ManagedResource {
+ var (
+ injectedLabels = map[string]string{ShootNoCleanup: "true"}
+ labels = map[string]string{ManagedResourceLabelKeyOrigin: ManagedResourceLabelValueGardener}
+ )
+
+ return manager.NewManagedResource(c).
+ WithNamespacedName(namespace, name).
+ WithLabels(labels).
+ WithInjectedLabels(injectedLabels).
+ KeepObjects(keepObjects)
+}
+
+// NewManagedResourceForSeed constructs a new ManagedResource object for the seed's Gardener-Resource-Manager.
+func NewManagedResourceForSeed(c client.Client, name, namespace string, keepObjects bool) *manager.ManagedResource {
+ return manager.NewManagedResource(c).
+ WithNamespacedName(namespace, name).
+ WithClass("seed").
+ KeepObjects(keepObjects)
+}
+
+// ManagedResourceSecretName returns the name of a corev1.Scret for the given name of a
+// resourcesv1alpha1.ManagedResource.
+func ManagedResourceSecretName(managedResourceName string) string {
+ return ManagedResourceSecretPrefix + managedResourceName
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/network_policies.go b/vendor/github.com/gardener/gardener/pkg/operation/common/network_policies.go
new file mode 100644
index 0000000..7d54ccf
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/operation/common/network_policies.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "net"
+)
+
+// Private8BitBlock returns a private network (RFC1918) 10.0.0.0/8 IPv4 block
+func Private8BitBlock() *net.IPNet {
+ return &net.IPNet{IP: net.IP{10, 0, 0, 0}, Mask: net.CIDRMask(8, 32)}
+}
+
+// Private12BitBlock returns a private network (RFC1918) 172.16.0.0/12 IPv4 block
+func Private12BitBlock() *net.IPNet {
+ return &net.IPNet{IP: net.IP{172, 16, 0, 0}, Mask: net.CIDRMask(12, 32)}
+}
+
+// Private16BitBlock returns a private network (RFC1918) 192.168.0.0/16 IPv4 block
+func Private16BitBlock() *net.IPNet {
+ return &net.IPNet{IP: net.IP{192, 168, 0, 0}, Mask: net.CIDRMask(16, 32)}
+}
+
+// CarrierGradeNATBlock returns a Carrier-grade NAT (RFC6598) 100.64.0.0/10 IPv4 block
+func CarrierGradeNATBlock() *net.IPNet {
+ return &net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(10, 32)}
+}
+
+// AllPrivateNetworkBlocks returns a list of all Private network (RFC1918) and
+// Carrier-grade NAT (RFC6598) IPv4 blocks.
+func AllPrivateNetworkBlocks() []net.IPNet {
+ return []net.IPNet{
+ *Private8BitBlock(),
+ *Private12BitBlock(),
+ *Private16BitBlock(),
+ *CarrierGradeNATBlock(),
+ }
+}
+
+// ToExceptNetworks returns a list of maps with `network` key containing one of `networks`
+// and `except` key containgn list of `cidr` which are part of those CIDRs.
+//
+// Calling
+// `ToExceptNetworks(AllPrivateNetworkBlocks(),"10.10.0.0/24","172.16.1.0/24","192.168.1.0/24","100.64.1.0/24")`
+// produces:
+//
+// [
+// {"network": "10.0.0.0/8", "except": ["10.10.0.0/24"]},
+// {"network": "172.16.0.0/12", "except": ["172.16.1.0/24"]},
+// {"network": "192.168.0.0/16", "except": ["192.168.1.0/24"]},
+// {"network": "100.64.0.0/10", "except": ["100.64.1.0/24"]},
+// ]
+func ToExceptNetworks(networks []net.IPNet, except ...string) ([]interface{}, error) {
+ result := []interface{}{}
+
+ for _, n := range networks {
+ excluded, err := excludeBlock(&n, except...)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, map[string]interface{}{
+ "network": n.String(),
+ "except": excluded,
+ })
+ }
+ return result, nil
+}
+
+// ExceptNetworks returns a list of maps with `network` key containing one of `networks`
+// and `except` key containgn list of `cidr` which are part of those CIDRs.
+//
+// Calling
+// `ExceptNetworks([]garden.CIDR{"10.0.0.0/8","172.16.0.0/12"},"10.10.0.0/24","172.16.1.0/24")`
+// produces:
+//
+// [
+// {"network": "10.0.0.0/8", "except": ["10.10.0.0/24"]},
+// {"network": "172.16.0.0/12", "except": ["172.16.1.0/24"]},
+// ]
+func ExceptNetworks(networks []string, except ...string) ([]interface{}, error) {
+ ipNets := []net.IPNet{}
+ for _, n := range networks {
+ _, net, err := net.ParseCIDR(string(n))
+ if err != nil {
+ return nil, err
+ }
+ ipNets = append(ipNets, *net)
+ }
+ return ToExceptNetworks(ipNets, except...)
+}
+
+func excludeBlock(parentBlock *net.IPNet, cidrs ...string) ([]string, error) {
+ matchedCIDRs := []string{}
+
+ for _, cidr := range cidrs {
+ ip, _, err := net.ParseCIDR(string(cidr))
+ if err != nil {
+ return matchedCIDRs, err
+ }
+ if parentBlock.Contains(ip) {
+ matchedCIDRs = append(matchedCIDRs, cidr)
+ }
+ }
+ return matchedCIDRs, nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/types.go b/vendor/github.com/gardener/gardener/pkg/operation/common/types.go
new file mode 100644
index 0000000..f8bf694
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/operation/common/types.go
@@ -0,0 +1,507 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "time"
+
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+const (
+ // VPNTunnel dictates that VPN is used as a tunnel between seed and shoot networks.
+ VPNTunnel string = "vpn-shoot"
+
+ // KonnectivityTunnel dictates that a konnectivity proxy is used as a tunnel between seed and shoot networks.
+ KonnectivityTunnel string = "konnectivity-agent"
+
+ // BasicAuthSecretName is the name of the secret containing basic authentication credentials for the kube-apiserver.
+ BasicAuthSecretName = "kube-apiserver-basic-auth"
+
+ // ChartPath is the path to the Helm charts.
+ ChartPath = "charts"
+
+ // CloudConfigPrefix is a constant for the prefix which is added to secret storing the original cloud config (which
+ // is being downloaded from the cloud-config-downloader process)
+ CloudConfigPrefix = "cloud-config"
+
+ // CloudConfigFilePath is the path on the shoot worker nodes to which the operating system specific configuration
+ // will be downloaded.
+ CloudConfigFilePath = "/var/lib/cloud-config-downloader/downloads/cloud_config"
+
+ // ConfirmationDeletion is an annotation on a Shoot and Project resources whose value must be set to "true" in order to
+ // allow deleting the resource (if the annotation is not set any DELETE request will be denied).
+ ConfirmationDeletion = "confirmation.gardener.cloud/deletion"
+
+ // ControllerManagerInternalConfigMapName is the name of the internal config map in which the Gardener controller
+ // manager stores its configuration.
+ ControllerManagerInternalConfigMapName = "gardener-controller-manager-internal-config"
+
+ // DNSProvider is the key for an annotation on a Kubernetes Secret object whose value must point to a valid
+ // DNS provider.
+ DNSProvider = "dns.gardener.cloud/provider"
+
+ // DNSDomain is the key for an annotation on a Kubernetes Secret object whose value must point to a valid
+ // domain name.
+ DNSDomain = "dns.gardener.cloud/domain"
+
+ // DNSIncludeZones is the key for an annotation on a Kubernetes Secret object whose value must point to a list
+ // of zones that shall be included.
+ DNSIncludeZones = "dns.gardener.cloud/include-zones"
+
+ // DNSExcludeZones is the key for an annotation on a Kubernetes Secret object whose value must point to a list
+ // of zones that shall be excluded.
+ DNSExcludeZones = "dns.gardener.cloud/exclude-zones"
+
+ // EtcdEncryptionSecretName is the name of the shoot-specific secret which contains
+ // that shoot's EncryptionConfiguration. The EncryptionConfiguration contains a key
+ // which the shoot's apiserver uses for encrypting selected etcd content.
+ // Should match charts/seed-controlplane/charts/kube-apiserver/templates/deployment.yaml
+ EtcdEncryptionSecretName = "etcd-encryption-secret"
+
+ // EtcdEncryptionSecretFileName is the name of the file within the EncryptionConfiguration
+ // which is made available as volume mount to the shoot's apiserver.
+ // Should match charts/seed-controlplane/charts/kube-apiserver/templates/deployment.yaml
+ EtcdEncryptionSecretFileName = "encryption-configuration.yaml"
+
+ // EtcdEncryptionChecksumAnnotationName is the name of the annotation with which to annotate
+ // the EncryptionConfiguration secret to denote the checksum of the EncryptionConfiguration
+ // that was used when last rewriting secrets.
+ EtcdEncryptionChecksumAnnotationName = "shoot.gardener.cloud/etcd-encryption-configuration-checksum"
+
+ // EtcdEncryptionChecksumLabelName is the name of the label which is added to the shoot
+ // secrets after rewriting them to ensure that successfully rewritten secrets are not
+ // (unnecessarily) rewritten during each reconciliation.
+ EtcdEncryptionChecksumLabelName = "shoot.gardener.cloud/etcd-encryption-configuration-checksum"
+
+ // EtcdEncryptionForcePlaintextAnnotationName is the name of the annotation with which to annotate
+ // the EncryptionConfiguration secret to force the decryption of shoot secrets
+ EtcdEncryptionForcePlaintextAnnotationName = "shoot.gardener.cloud/etcd-encryption-force-plaintext-secrets"
+
+ // EtcdEncryptionEncryptedResourceSecrets is the name of the secret resource to be encrypted
+ EtcdEncryptionEncryptedResourceSecrets = "secrets"
+
+ // EtcdEncryptionKeyPrefix is the prefix for the key name of the EncryptionConfiguration's key
+ EtcdEncryptionKeyPrefix = "key"
+
+ // EtcdEncryptionKeySecretLen is the expected length in bytes of the EncryptionConfiguration's key
+ EtcdEncryptionKeySecretLen = 32
+
+ // GardenerDeletionProtected is a label on CustomResourceDefinitions indicating that the deletion is protected, i.e.
+ // it must be confirmed with the `confirmation.gardener.cloud/deletion=true` annotation before a `DELETE` call
+ // is accepted.
+ GardenerDeletionProtected = "gardener.cloud/deletion-protected"
+
+ // ETCDEncryptionConfigDataName is the name of ShootState data entry holding the current key and encryption state used to encrypt shoot resources
+ ETCDEncryptionConfigDataName = "etcdEncryptionConfiguration"
+
+ // GardenRoleDefaultDomain is the value of the GardenRole key indicating type 'default-domain'.
+ GardenRoleDefaultDomain = "default-domain"
+
+ // GardenRoleInternalDomain is the value of the GardenRole key indicating type 'internal-domain'.
+ GardenRoleInternalDomain = "internal-domain"
+
+ // GardenRoleOpenVPNDiffieHellman is the value of the GardenRole key indicating type 'openvpn-diffie-hellman'.
+ GardenRoleOpenVPNDiffieHellman = "openvpn-diffie-hellman"
+
+ // GardenRoleGlobalMonitoring is the value of the GardenRole key indicating type 'global-monitoring'
+ GardenRoleGlobalMonitoring = "global-monitoring"
+
+ // GardenRoleAlerting is the value of GardenRole key indicating type 'alerting'.
+ GardenRoleAlerting = "alerting"
+
+ // GardenRoleHvpa is the value of GardenRole key indicating type 'hvpa'.
+ GardenRoleHvpa = "hvpa"
+
+ // GardenCreatedBy is the key for an annotation of a Shoot cluster whose value indicates contains the username
+ // of the user that created the resource.
+ GardenCreatedBy = "gardener.cloud/created-by"
+
+ // GrafanaOperatorsPrefix is a constant for a prefix used for the operators Grafana instance.
+ GrafanaOperatorsPrefix = "go"
+
+ // GrafanaUsersPrefix is a constant for a prefix used for the users Grafana instance.
+ GrafanaUsersPrefix = "gu"
+
+ // GrafanaOperatorsRole is a constant for the operators role.
+ GrafanaOperatorsRole = "operators"
+
+ // GrafanaUsersRole is a constant for the users role.
+ GrafanaUsersRole = "users"
+
+ // PrometheusPrefix is a constant for a prefix used for the Prometheus instance.
+ PrometheusPrefix = "p"
+
+ // AlertManagerPrefix is a constant for a prefix used for the AlertManager instance.
+ AlertManagerPrefix = "au"
+
+ // IngressPrefix is the part of a FQDN which will be used to construct the domain name for an ingress controller of
+ // a Shoot cluster. For example, when a Shoot specifies domain 'cluster.example.com', the ingress domain would be
+ // '*..cluster.example.com'.
+ IngressPrefix = "ingress"
+
+ // APIServerPrefix is the part of a FQDN which will be used to construct the domain name for the kube-apiserver of
+ // a Shoot cluster. For example, when a Shoot specifies domain 'cluster.example.com', the apiserver domain would be
+ // 'api.cluster.example.com'.
+ APIServerPrefix = "api"
+
+ // InternalDomainKey is a key which must be present in an internal domain constructed for a Shoot cluster. If the
+ // configured internal domain already contains it, it won't be added twice. If it does not contain it, it will be
+ // appended.
+ InternalDomainKey = "internal"
+
+ // KubeControllerManagerServerName is the name of the kube-controller-manager server.
+ KubeControllerManagerServerName = "kube-controller-manager-server"
+
+ // KonnectivityServerCertName is the name of the api-proxy konnectivity-server
+ KonnectivityServerCertName = "konnectivity-server"
+
+ // CoreDNSDeploymentName is the name of the coredns deployment.
+ CoreDNSDeploymentName = "coredns"
+
+ // VPNShootDeploymentName is the name of the vpn-shoot deployment.
+ VPNShootDeploymentName = "vpn-shoot"
+
+ // KubeProxyDaemonSetName is the name of the kube-proxy daemon set.
+ KubeProxyDaemonSetName = "kube-proxy"
+
+ // NodeProblemDetectorDaemonSetName is the name of the node-problem-detector daemon set.
+ NodeProblemDetectorDaemonSetName = "node-problem-detector"
+
+ // BlackboxExporterDeploymentName is the name of the blackbox-exporter deployment.
+ BlackboxExporterDeploymentName = "blackbox-exporter"
+
+ // NodeExporterDaemonSetName is the name of the node-exporter daemon set.
+ NodeExporterDaemonSetName = "node-exporter"
+
+ // KubecfgUsername is the username for the token used for the kubeconfig the shoot.
+ KubecfgUsername = "system:cluster-admin"
+
+ // KubecfgSecretName is the name of the kubecfg secret.
+ KubecfgSecretName = "kubecfg"
+
+ // DependencyWatchdogExternalProbeSecretName is the name of the kubecfg secret with internal DNS for external access.
+ DependencyWatchdogExternalProbeSecretName = "dependency-watchdog-external-probe"
+
+ // DependencyWatchdogInternalProbeSecretName is the name of the kubecfg secret with cluster IP access.
+ DependencyWatchdogInternalProbeSecretName = "dependency-watchdog-internal-probe"
+
+ // DependencyWatchdogUserName is the user name of the dependency-watchdog.
+ DependencyWatchdogUserName = "gardener.cloud:system:dependency-watchdog"
+
+ // KubeAPIServerHealthCheck is a key for the kube-apiserver-health-check user.
+ KubeAPIServerHealthCheck = "kube-apiserver-health-check"
+
+ // StaticTokenSecretName is the name of the secret containing static tokens for the kube-apiserver.
+ StaticTokenSecretName = "static-token"
+
+ // VPASecretName is the name of the secret used by VPA
+ VPASecretName = "vpa-tls-certs"
+
+ // ProjectPrefix is the prefix of namespaces representing projects.
+ ProjectPrefix = "garden-"
+
+ // ProjectName is the key of a label on namespaces whose value holds the project name.
+ ProjectName = "project.gardener.cloud/name"
+
+ // ProjectSkipStaleCheck is the key of an annotation on a project namespace that marks the associated Project to be
+ // skipped by the stale project controller. If the project has already configured stale timestamps in its status
+ // then they will be reset.
+ ProjectSkipStaleCheck = "project.gardener.cloud/skip-stale-check"
+
+ // NamespaceProject is the key of an annotation on namespace whose value holds the project uid.
+ NamespaceProject = "namespace.gardener.cloud/project"
+
+ // NamespaceKeepAfterProjectDeletion is a constant for an annotation on a `Namespace` resource that states that it
+ // should not be deleted if the corresponding `Project` gets deleted. Please note that all project related labels
+ // from the namespace will be removed when the project is being deleted.
+ NamespaceKeepAfterProjectDeletion = "namespace.gardener.cloud/keep-after-project-deletion"
+
+ // ShootAlphaScalingAPIServerClass is a constant for an annotation on the shoot stating the initial API server class.
+ // It influences the size of the initial resource requests/limits.
+ // Possible values are [small, medium, large, xlarge, 2xlarge].
+ // Note that this annotation is alpha and can be removed anytime without further notice. Only use it if you know
+ // what you do.
+ ShootAlphaScalingAPIServerClass = "alpha.kube-apiserver.scaling.shoot.gardener.cloud/class"
+
+ // ShootExpirationTimestamp is an annotation on a Shoot resource whose value represents the time when the Shoot lifetime
+ // is expired. The lifetime can be extended, but at most by the minimal value of the 'clusterLifetimeDays' property
+ // of referenced quotas.
+ ShootExpirationTimestamp = "shoot.gardener.cloud/expiration-timestamp"
+
+ // ShootNoCleanup is a constant for a label on a resource indicating that the Gardener cleaner should not delete this
+ // resource when cleaning a shoot during the deletion flow.
+ ShootNoCleanup = "shoot.gardener.cloud/no-cleanup"
+
+ // ShootStatus is a constant for a label on a Shoot resource indicating that the Shoot's health.
+ ShootStatus = "shoot.gardener.cloud/status"
+
+ // ShootOperationMaintain is a constant for an annotation on a Shoot indicating that the Shoot maintenance shall be executed as soon as
+ // possible.
+ ShootOperationMaintain = "maintain"
+
+ // FailedShootNeedsRetryOperation is a constant for an annotation on a Shoot in a failed state indicating that a retry operation should be triggered during the next maintenance time window.
+ FailedShootNeedsRetryOperation = "maintenance.shoot.gardener.cloud/needs-retry-operation"
+
+ // ShootOperationRotateKubeconfigCredentials is a constant for an annotation on a Shoot indicating that the credentials contained in the
+ // kubeconfig that is handed out to the user shall be rotated.
+ ShootOperationRotateKubeconfigCredentials = "rotate-kubeconfig-credentials"
+
+ // ShootTasks is a constant for an annotation on a Shoot which states that certain tasks should be done.
+ ShootTasks = "shoot.gardener.cloud/tasks"
+
+ // ShootTaskDeployInfrastructure is a name for a Shoot's infrastructure deployment task. It indicates that the
+ // Infrastructure extension resource shall be reconciled.
+ ShootTaskDeployInfrastructure = "deployInfrastructure"
+
+ // ShootTaskRestartControlPlanePods is a name for a Shoot task which is dedicated to restart related control plane pods.
+ ShootTaskRestartControlPlanePods = "restartControlPlanePods"
+
+ // ShootOperationRetry is a constant for an annotation on a Shoot indicating that a failed Shoot reconciliation shall be retried.
+ ShootOperationRetry = "retry"
+
+ // ShootOperationReconcile is a constant for an annotation on a Shoot indicating that a Shoot reconciliation shall be triggered.
+ ShootOperationReconcile = "reconcile"
+
+ // ShootSyncPeriod is a constant for an annotation on a Shoot which may be used to overwrite the global Shoot controller sync period.
+ // The value must be a duration. It can also be used to disable the reconciliation at all by setting it to 0m. Disabling the reconciliation
+ // does only mean that the period reconciliation is disabled. However, when the Gardener is restarted/redeployed or the specification is
+ // changed then the reconciliation flow will be executed.
+ ShootSyncPeriod = "shoot.gardener.cloud/sync-period"
+
+ // ShootIgnore is a constant for an annotation on a Shoot which may be used to tell the Gardener that the Shoot with this name should be
+ // ignored completely. That means that the Shoot will never reach the reconciliation flow (independent of the operation (create/update/
+ // delete)).
+ ShootIgnore = "shoot.gardener.cloud/ignore"
+
+ // ManagedResourceShootCoreName is the name of the shoot core managed resource.
+ ManagedResourceShootCoreName = "shoot-core"
+
+ // ManagedResourceAddonsName is the name of the addons managed resource.
+ ManagedResourceAddonsName = "addons"
+
+ // GardenerResourceManagerImageName is the name of the GardenerResourceManager image.
+ GardenerResourceManagerImageName = "gardener-resource-manager"
+
+ // GardenerSeedAdmissionControllerImageName is the name of the GardenerSeedAdmissionController image.
+ GardenerSeedAdmissionControllerImageName = "gardener-seed-admission-controller"
+
+ // CoreDNSImageName is the name of the CoreDNS image.
+ CoreDNSImageName = "coredns"
+
+ // NodeLocalDNSImageName is the name of the node-local-dns image.
+ NodeLocalDNSImageName = "node-local-dns"
+
+ // NodeProblemDetectorImageName is the name of the node-problem-detector image.
+ NodeProblemDetectorImageName = "node-problem-detector"
+
+ // KubeAPIServerImageName is the name of the kube-apiserver image.
+ KubeAPIServerImageName = "kube-apiserver"
+
+ // KubeControllerManagerImageName is the name of the kube-controller-manager image.
+ KubeControllerManagerImageName = "kube-controller-manager"
+
+ // KubeSchedulerImageName is the name of the kube-scheduler image.
+ KubeSchedulerImageName = "kube-scheduler"
+
+ // KubeProxyImageName is the name of the kube-proxy image.
+ KubeProxyImageName = "kube-proxy"
+
+ // HyperkubeImageName is the name of the hyperkube image (used for kubectl + kubelet on the worker nodes).
+ HyperkubeImageName = "hyperkube"
+
+ // MetricsServerImageName is the name of the MetricsServer image.
+ MetricsServerImageName = "metrics-server"
+
+ // VPNShootImageName is the name of the VPNShoot image.
+ VPNShootImageName = "vpn-shoot"
+
+ // VPNSeedImageName is the name of the VPNSeed image.
+ VPNSeedImageName = "vpn-seed"
+
+ // KonnectivityServerImageName is the name of the konnectivity server image.
+ KonnectivityServerImageName = "konnectivity-server"
+
+ // KonnectivityServerUserName is the user name of the konnectivity server used for the token
+ KonnectivityServerUserName = "system:konnectivity-server"
+
+ // KonnectivityServerKubeconfig is the name of the konnectivity-server kubeconfig
+ KonnectivityServerKubeconfig = "konnectivity-server-kubeconfig"
+
+ // KonnectivityAgentImageName is the name of the konnectivity agent image.
+ KonnectivityAgentImageName = "konnectivity-agent"
+
+ // NodeExporterImageName is the name of the NodeExporter image.
+ NodeExporterImageName = "node-exporter"
+
+ // KubernetesDashboardImageName is the name of the kubernetes-dashboard image.
+ KubernetesDashboardImageName = "kubernetes-dashboard"
+
+ // KubernetesDashboardMetricsScraperImageName is the name of the kubernetes-dashboard-metrics-scraper image.
+ KubernetesDashboardMetricsScraperImageName = "kubernetes-dashboard-metrics-scraper"
+
+ // BusyboxImageName is the name of the Busybox image.
+ BusyboxImageName = "busybox"
+
+ // NginxIngressControllerImageName is the name of the NginxIngressController image.
+ NginxIngressControllerImageName = "nginx-ingress-controller"
+
+ // NginxIngressControllerSeedImageName is the name of the NginxIngressController image.
+ NginxIngressControllerSeedImageName = "nginx-ingress-controller-seed"
+
+ // IngressDefaultBackendImageName is the name of the IngressDefaultBackend image.
+ IngressDefaultBackendImageName = "ingress-default-backend"
+
+ // ClusterAutoscalerImageName is the name of the ClusterAutoscaler image.
+ ClusterAutoscalerImageName = "cluster-autoscaler"
+
+ // AlertManagerImageName is the name of the AlertManager image.
+ AlertManagerImageName = "alertmanager"
+
+ // ConfigMapReloaderImageName is the name of the ConfigMapReloader image.
+ ConfigMapReloaderImageName = "configmap-reloader"
+
+ // GrafanaImageName is the name of the Grafana image.
+ GrafanaImageName = "grafana"
+
+ // PrometheusImageName is the name of the Prometheus image.
+ PrometheusImageName = "prometheus"
+
+ // BlackboxExporterImageName is the name of the BlackboxExporter image.
+ BlackboxExporterImageName = "blackbox-exporter"
+
+ // KubeStateMetricsImageName is the name of the KubeStateMetrics image.
+ KubeStateMetricsImageName = "kube-state-metrics"
+
+ // EtcdDruidImageName is the name of Etcd Druid image
+ EtcdDruidImageName = "etcd-druid"
+
+ // PauseContainerImageName is the name of the PauseContainer image.
+ PauseContainerImageName = "pause-container"
+
+ // LokiImageName is the name of the Loki image used for logging
+ LokiImageName = "loki"
+
+ // FluentBitImageName is the image of Fluent-bit image
+ FluentBitImageName = "fluent-bit"
+
+ // FluentBitPluginInstaller is the image of Fluent-bit plugin installer image
+ FluentBitPluginInstaller = "fluent-bit-plugin-installer"
+
+ // AlpineImageName is the name of alpine image
+ AlpineImageName = "alpine"
+
+ // AlpineIptablesImageName is the name of the alpine image with pre-installed iptable rules
+ AlpineIptablesImageName = "alpine-iptables"
+
+ // SeedSpecHash is a constant for a label on `ControllerInstallation`s (similar to `pod-template-hash` on `Pod`s).
+ SeedSpecHash = "seed-spec-hash"
+
+ // RegistrationSpecHash is a constant for a label on `ControllerInstallation`s (similar to `pod-template-hash` on `Pod`s).
+ RegistrationSpecHash = "registration-spec-hash"
+
+ // VpaAdmissionControllerImageName is the name of the vpa-admission-controller image
+ VpaAdmissionControllerImageName = "vpa-admission-controller"
+ // VpaRecommenderImageName is the name of the vpa-recommender image
+ VpaRecommenderImageName = "vpa-recommender"
+ // VpaUpdaterImageName is the name of the vpa-updater image
+ VpaUpdaterImageName = "vpa-updater"
+ // VpaExporterImageName is the name of the vpa-exporter image
+ VpaExporterImageName = "vpa-exporter"
+ // VpaAdmissionControllerName is the name of the vpa-admission-controller name.
+ VpaAdmissionControllerName = "gardener.cloud:vpa:admission-controller"
+ // VpaRecommenderName is the name of the vpa-recommender name.
+ VpaRecommenderName = "gardener.cloud:vpa:recommender"
+ // VpaUpdaterName is the name of the vpa-updater name.
+ VpaUpdaterName = "gardener.cloud:vpa:updater"
+ // VpaExporterName is the name of the vpa-exporter name.
+ VpaExporterName = "gardener.cloud:vpa:exporter"
+
+ // HvpaControllerImageName is the name of the hvpa-controller image
+ HvpaControllerImageName = "hvpa-controller"
+
+ // DependencyWatchdogImageName is the name of the dependency-watchdog image
+ DependencyWatchdogImageName = "dependency-watchdog"
+
+ // IstioProxyImageName is the image of Istio proxy image
+ IstioProxyImageName = "istio-proxy"
+
+ // IstioIstiodImageName is the image of Istio istiod image
+ IstioIstiodImageName = "istio-istiod"
+
+ // IstioNamespace is the istio-system namespace
+ IstioNamespace = "istio-system"
+
+ // APIServerProxyImageName is the image of apiserver-proxy
+ APIServerProxyImageName = "apiserver-proxy"
+
+ // APIServerProxySidecarImageName is the image of apiserver-proxy sidecar.
+ APIServerProxySidecarImageName = "apiserver-proxy-sidecar"
+
+ // APIServerProxyPodMutatorWebhookImageName is the image of apiserver-proxy pod mutator webhook.
+ APIServerProxyPodMutatorWebhookImageName = "apiserver-proxy-pod-webhook"
+
+ // ServiceAccountSigningKeySecretDataKey is the data key of a signing key Kubernetes secret.
+ ServiceAccountSigningKeySecretDataKey = "signing-key"
+
+ // ControlPlaneWildcardCert is the value of the GardenRole key indicating type 'controlplane-cert'.
+ // It refers to a wildcard tls certificate which can be used for services exposed under the corresponding domain.
+ ControlPlaneWildcardCert = "controlplane-cert"
+
+ // AlertManagerTLS is the name of the secret resource which holds the TLS certificate for Alert Manager.
+ AlertManagerTLS = "alertmanager-tls"
+ // GrafanaTLS is the name of the secret resource which holds the TLS certificate for Grafana.
+ GrafanaTLS = "grafana-tls"
+ // PrometheusTLS is the name of the secret resource which holds the TLS certificate for Prometheus.
+ PrometheusTLS = "prometheus-tls"
+
+ // EndUserCrtValidity is the time period a user facing certificate is valid.
+ EndUserCrtValidity = 730 * 24 * time.Hour // ~2 years, see https://support.apple.com/en-us/HT210176
+
+ // ShootDNSIngressName is a constant for the DNS resources used for the shoot ingress addon.
+ ShootDNSIngressName = "ingress"
+)
+
+var (
+ // RequiredControlPlaneDeployments is a set of the required shoot control plane deployments
+ // running in the seed.
+ RequiredControlPlaneDeployments = sets.NewString(
+ v1beta1constants.DeploymentNameGardenerResourceManager,
+ v1beta1constants.DeploymentNameKubeAPIServer,
+ v1beta1constants.DeploymentNameKubeControllerManager,
+ v1beta1constants.DeploymentNameKubeScheduler,
+ )
+
+ // RequiredControlPlaneEtcds is a set of the required shoot control plane etcds
+ // running in the seed.
+ RequiredControlPlaneEtcds = sets.NewString(
+ v1beta1constants.ETCDMain,
+ v1beta1constants.ETCDEvents,
+ )
+
+ // RequiredMonitoringSeedDeployments is a set of the required seed monitoring deployments.
+ RequiredMonitoringSeedDeployments = sets.NewString(
+ v1beta1constants.DeploymentNameGrafanaOperators,
+ v1beta1constants.DeploymentNameGrafanaUsers,
+ v1beta1constants.DeploymentNameKubeStateMetricsShoot,
+ )
+
+ // RequiredLoggingStatefulSets is a set of the required logging stateful sets.
+ RequiredLoggingStatefulSets = sets.NewString(
+ v1beta1constants.StatefulSetNameLoki,
+ )
+)
diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go b/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go
new file mode 100644
index 0000000..f838ebd
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go
@@ -0,0 +1,779 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ gardencorelisters "github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1"
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/utils"
+ kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
+ "github.com/gardener/gardener/pkg/version"
+
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ appsv1 "k8s.io/api/apps/v1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+ batchv1beta1 "k8s.io/api/batch/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ networkingv1 "k8s.io/api/networking/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ autoscalingv1beta2 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// TimeNow returns the current time. Exposed for testing.
+var TimeNow = time.Now
+
+// GetSecretKeysWithPrefix returns a list of keys of the given map which are prefixed with .
+func GetSecretKeysWithPrefix(kind string, m map[string]*corev1.Secret) []string {
+ var result []string
+ for key := range m {
+ if strings.HasPrefix(key, kind) {
+ result = append(result, key)
+ }
+ }
+ return result
+}
+
+// ComputeOffsetIP parses the provided and offsets with the value of .
+// For example, = 100.64.0.0/11 and = 10 the result would be 100.64.0.10
+// IPv6 and IPv4 is supported.
+func ComputeOffsetIP(subnet *net.IPNet, offset int64) (net.IP, error) {
+ if subnet == nil {
+ return nil, fmt.Errorf("subnet is nil")
+ }
+
+ isIPv6 := false
+
+ bytes := subnet.IP.To4()
+ if bytes == nil {
+ isIPv6 = true
+ bytes = subnet.IP.To16()
+ }
+
+ ip := net.IP(big.NewInt(0).Add(big.NewInt(0).SetBytes(bytes), big.NewInt(offset)).Bytes())
+
+ if !subnet.Contains(ip) {
+ return nil, fmt.Errorf("cannot compute IP with offset %d - subnet %q too small", offset, subnet)
+ }
+
+ // there is no broadcast address on IPv6
+ if isIPv6 {
+ return ip, nil
+ }
+
+ for i := range ip {
+ // IP address is not the same, so it's not the broadcast ip.
+ if ip[i] != ip[i]|^subnet.Mask[i] {
+ return ip.To4(), nil
+ }
+ }
+
+ return nil, fmt.Errorf("computed IPv4 address %q is broadcast for subnet %q", ip, subnet)
+}
+
+// GenerateAddonConfig returns the provided in case is true. Otherwise, nil is
+// being returned.
+func GenerateAddonConfig(values map[string]interface{}, enabled bool) map[string]interface{} {
+ v := map[string]interface{}{
+ "enabled": enabled,
+ }
+ if enabled {
+ for key, value := range values {
+ v[key] = value
+ }
+ }
+ return v
+}
+
+// GenerateBackupEntryName returns BackupEntry resource name created from provided and .
+func GenerateBackupEntryName(seedNamespace string, shootUID types.UID) string {
+ return fmt.Sprintf("%s--%s", seedNamespace, shootUID)
+}
+
+// ExtractShootDetailsFromBackupEntryName returns Shoot resource technicalID its UID from provided .
+func ExtractShootDetailsFromBackupEntryName(backupEntryName string) (shootTechnicalID, shootUID string) {
+ tokens := strings.Split(backupEntryName, "--")
+ shootUID = tokens[len(tokens)-1]
+ shootTechnicalID = strings.TrimSuffix(backupEntryName, shootUID)
+ shootTechnicalID = strings.TrimSuffix(shootTechnicalID, "--")
+ return shootTechnicalID, shootUID
+}
+
+// IsFollowingNewNamingConvention determines whether the new naming convention followed for shoot resources.
+// TODO: Remove this and use only "--" as separator, once we have all shoots deployed as per new naming conventions.
+func IsFollowingNewNamingConvention(seedNamespace string) bool {
+ return len(strings.Split(seedNamespace, "--")) > 2
+}
+
+// ReplaceCloudProviderConfigKey replaces a key with the new value in the given cloud provider config.
+func ReplaceCloudProviderConfigKey(cloudProviderConfig, separator, key, value string) string {
+ keyValueRegexp := regexp.MustCompile(fmt.Sprintf(`(\Q%s\E%s)([^\n]*)`, key, separator))
+ return keyValueRegexp.ReplaceAllString(cloudProviderConfig, fmt.Sprintf(`${1}%q`, strings.Replace(value, `$`, `$$`, -1)))
+}
+
+// ProjectForNamespace returns the project object responsible for a given .
+// It tries to identify the project object by looking for the namespace name in the project spec.
+func ProjectForNamespace(projectLister gardencorelisters.ProjectLister, namespaceName string) (*gardencorev1beta1.Project, error) {
+ projectList, err := projectLister.List(labels.Everything())
+ if err != nil {
+ return nil, err
+ }
+
+ var projects []gardencorev1beta1.Project
+ for _, p := range projectList {
+ projects = append(projects, *p)
+ }
+
+ return projectForNamespace(projects, namespaceName)
+}
+
+// ProjectForNamespaceWithClient returns the project object responsible for a given .
+// It tries to identify the project object by looking for the namespace name in the project spec.
+func ProjectForNamespaceWithClient(ctx context.Context, c client.Client, namespaceName string) (*gardencorev1beta1.Project, error) {
+ projectList := &gardencorev1beta1.ProjectList{}
+ err := c.List(ctx, projectList)
+ if err != nil {
+ return nil, err
+ }
+
+ return projectForNamespace(projectList.Items, namespaceName)
+}
+
+func projectForNamespace(projects []gardencorev1beta1.Project, namespaceName string) (*gardencorev1beta1.Project, error) {
+ for _, project := range projects {
+ if project.Spec.Namespace != nil && *project.Spec.Namespace == namespaceName {
+ return &project, nil
+ }
+ }
+
+ return nil, apierrors.NewNotFound(gardencorev1beta1.Resource("Project"), fmt.Sprintf("for namespace %s", namespaceName))
+}
+
+// ProjectNameForNamespace determines the project name for a given . It tries to identify it first per the namespace's ownerReferences.
+// If it doesn't help then it will check whether the project name is a label on the namespace object. If it doesn't help then the name can be inferred
+// from the namespace name in case it is prefixed with the project prefix. If none of those approaches the namespace name itself is returned as project
+// name.
+func ProjectNameForNamespace(namespace *corev1.Namespace) string {
+ for _, ownerReference := range namespace.OwnerReferences {
+ if ownerReference.Kind == "Project" {
+ return ownerReference.Name
+ }
+ }
+
+ if name, ok := namespace.Labels[ProjectName]; ok {
+ return name
+ }
+
+ if nameSplit := strings.Split(namespace.Name, ProjectPrefix); len(nameSplit) > 1 {
+ return nameSplit[1]
+ }
+
+ return namespace.Name
+}
+
+// GardenerDeletionGracePeriod is the default grace period for Gardener's force deletion methods.
+var GardenerDeletionGracePeriod = 5 * time.Minute
+
+// ShouldObjectBeRemoved determines whether the given object should be gone now.
+// This is calculated by first checking the deletion timestamp of an object: If the deletion timestamp
+// is unset, the object should not be removed - i.e. this returns false.
+// Otherwise, it is checked whether the deletionTimestamp is before the current time minus the
+// grace period.
+func ShouldObjectBeRemoved(obj metav1.Object, gracePeriod time.Duration) bool {
+ deletionTimestamp := obj.GetDeletionTimestamp()
+ if deletionTimestamp == nil {
+ return false
+ }
+
+ return deletionTimestamp.Time.Before(time.Now().Add(-gracePeriod))
+}
+
+// DeleteHvpa delete all resources required for the HVPA in the given namespace.
+func DeleteHvpa(ctx context.Context, k8sClient kubernetes.Interface, namespace string) error {
+ if k8sClient == nil {
+ return fmt.Errorf("require kubernetes client")
+ }
+
+ listOptions := metav1.ListOptions{
+ LabelSelector: fmt.Sprintf("%s=%s", v1beta1constants.GardenRole, GardenRoleHvpa),
+ }
+
+ // Delete all CRDs with label "gardener.cloud/role=hvpa"
+ // Workaround: Due to https://github.com/gardener/gardener/issues/2257, we first list the HVPA CRDs and then remove
+ // them one by one.
+ crdList, err := k8sClient.APIExtension().ApiextensionsV1beta1().CustomResourceDefinitions().List(ctx, listOptions)
+ if err != nil {
+ return err
+ }
+ for _, crd := range crdList.Items {
+ if err := k8sClient.APIExtension().ApiextensionsV1beta1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{}); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ }
+
+ // Delete all Deployments with label "gardener.cloud/role=hvpa"
+ deletePropagation := metav1.DeletePropagationForeground
+ if err := k8sClient.Kubernetes().AppsV1().Deployments(namespace).DeleteCollection(ctx, metav1.DeleteOptions{PropagationPolicy: &deletePropagation}, listOptions); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+
+ // Delete all ClusterRoles with label "gardener.cloud/role=hvpa"
+ if err := k8sClient.Kubernetes().RbacV1().ClusterRoles().DeleteCollection(ctx, metav1.DeleteOptions{}, listOptions); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+
+ // Delete all ClusterRoleBindings with label "gardener.cloud/role=hvpa"
+ if err := k8sClient.Kubernetes().RbacV1().ClusterRoleBindings().DeleteCollection(ctx, metav1.DeleteOptions{}, listOptions); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+
+ // Delete all ServiceAccounts with label "gardener.cloud/role=hvpa"
+ if err := k8sClient.Kubernetes().CoreV1().ServiceAccounts(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, listOptions); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DeleteVpa delete all resources required for the VPA in the given namespace.
+func DeleteVpa(ctx context.Context, c client.Client, namespace string, isShoot bool) error {
+ resources := []client.Object{
+ &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameVPAAdmissionController, Namespace: namespace}},
+ &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameVPARecommender, Namespace: namespace}},
+ &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameVPAUpdater, Namespace: namespace}},
+ &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "vpa-webhook", Namespace: namespace}},
+ &autoscalingv1beta2.VerticalPodAutoscaler{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameVPAAdmissionController, Namespace: namespace}},
+ &autoscalingv1beta2.VerticalPodAutoscaler{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameVPARecommender, Namespace: namespace}},
+ &autoscalingv1beta2.VerticalPodAutoscaler{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameVPAUpdater, Namespace: namespace}},
+ }
+
+ if isShoot {
+ resources = append(resources,
+ &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "vpa-admission-controller", Namespace: namespace}},
+ &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "vpa-recommender", Namespace: namespace}},
+ &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: VPASecretName, Namespace: namespace}},
+ &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "vpa-updater", Namespace: namespace}},
+ &networkingv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: "allow-kube-apiserver-to-vpa-admission-controller", Namespace: namespace}},
+ )
+ } else {
+ resources = append(resources,
+ &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:actor"}},
+ &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:admission-controller"}},
+ &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:checkpoint-actor"}},
+ &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:metrics-reader"}},
+ &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:target-reader"}},
+ &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:evictioner"}},
+ &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:actor"}},
+ &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:admission-controller"}},
+ &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:checkpoint-actor"}},
+ &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:metrics-reader"}},
+ &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:target-reader"}},
+ &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:vpa:seed:evictioner"}},
+ &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "vpa-admission-controller", Namespace: namespace}},
+ &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "vpa-recommender", Namespace: namespace}},
+ &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "vpa-updater", Namespace: namespace}},
+ &admissionregistrationv1beta1.MutatingWebhookConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "vpa-webhook-config-seed"}},
+ )
+ }
+
+ for _, resource := range resources {
+ if err := c.Delete(ctx, resource); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// DeleteLoggingStack deletes all resource of the EFK logging stack in the given namespace.
+func DeleteLoggingStack(ctx context.Context, k8sClient client.Client, namespace string) error {
+ if k8sClient == nil {
+ return errors.New("must provide non-nil kubernetes client to common.DeleteLoggingStack")
+ }
+
+ // Delete the resources below that match "gardener.cloud/role=logging"
+ lists := []client.ObjectList{
+ &corev1.ConfigMapList{},
+ &batchv1beta1.CronJobList{},
+ &rbacv1.ClusterRoleList{},
+ &rbacv1.ClusterRoleBindingList{},
+ &rbacv1.RoleList{},
+ &rbacv1.RoleBindingList{},
+ &appsv1.DaemonSetList{},
+ &appsv1.DeploymentList{},
+ // TODO: Use autoscaling/v2beta2 for Kubernetes 1.19+ shoots once kubernetes-v1.19 golang dependencies were vendored.
+ &autoscalingv2beta1.HorizontalPodAutoscalerList{},
+ &extensionsv1beta1.IngressList{},
+ &corev1.SecretList{},
+ &corev1.ServiceAccountList{},
+ &corev1.ServiceList{},
+ &appsv1.StatefulSetList{},
+ }
+
+ for _, list := range lists {
+ if err := k8sClient.List(ctx, list,
+ client.InNamespace(namespace),
+ client.MatchingLabels{v1beta1constants.GardenRole: v1beta1constants.GardenRoleLogging}); err != nil {
+ return err
+ }
+
+ if err := meta.EachListItem(list, func(obj runtime.Object) error {
+ return client.IgnoreNotFound(k8sClient.Delete(ctx, obj.(client.Object), kubernetes.DefaultDeleteOptions...))
+ }); err != nil {
+ return err
+ }
+ }
+
+ pvc := &corev1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "loki-loki-0",
+ Namespace: namespace,
+ },
+ }
+ if err := k8sClient.Delete(ctx, pvc); client.IgnoreNotFound(err) != nil && !meta.IsNoMatchError(err) {
+ return err
+ }
+
+ return nil
+}
+
+// GetContainerResourcesInStatefulSet returns the containers resources in StatefulSet
+func GetContainerResourcesInStatefulSet(ctx context.Context, k8sClient client.Client, key client.ObjectKey) ([]*corev1.ResourceRequirements, error) {
+ statefulSet := &appsv1.StatefulSet{}
+ resourcesPerContainer := make([]*corev1.ResourceRequirements, 0)
+ if err := k8sClient.Get(ctx, key, statefulSet); client.IgnoreNotFound(err) != nil {
+ return nil, err
+ } else if !apierrors.IsNotFound(err) {
+ for _, container := range statefulSet.Spec.Template.Spec.Containers {
+ resourcesPerContainer = append(resourcesPerContainer, container.Resources.DeepCopy())
+ }
+ return resourcesPerContainer, nil
+ }
+
+ // Use the default resources defined in values file
+ return nil, nil
+}
+
+// DeleteReserveExcessCapacity deletes the deployment and priority class for excess capacity
+func DeleteReserveExcessCapacity(ctx context.Context, k8sClient client.Client) error {
+ if k8sClient == nil {
+ return errors.New("must provide non-nil kubernetes client to common.DeleteReserveExcessCapacity")
+ }
+
+ deploy := &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "reserve-excess-capacity",
+ Namespace: v1beta1constants.GardenNamespace,
+ },
+ }
+ if err := k8sClient.Delete(ctx, deploy); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+
+ priorityClass := &schedulingv1beta1.PriorityClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "gardener-reserve-excess-capacity",
+ },
+ }
+ return client.IgnoreNotFound(k8sClient.Delete(ctx, priorityClass))
+}
+
+// DeleteAlertmanager deletes all resources of the Alertmanager in a given namespace.
+func DeleteAlertmanager(ctx context.Context, k8sClient client.Client, namespace string) error {
+ objs := []client.Object{
+ &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: v1beta1constants.StatefulSetNameAlertManager,
+ Namespace: namespace,
+ },
+ },
+ &extensionsv1beta1.Ingress{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "alertmanager",
+ Namespace: namespace,
+ },
+ },
+ &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "alertmanager-client",
+ Namespace: namespace,
+ },
+ },
+ &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "alertmanager",
+ Namespace: namespace,
+ },
+ },
+ &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "alertmanager-basic-auth",
+ Namespace: namespace,
+ },
+ },
+ &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: AlertManagerTLS,
+ Namespace: namespace,
+ },
+ },
+ &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "alertmanager-config",
+ Namespace: namespace,
+ },
+ },
+ &corev1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "alertmanager-db-alertmanager-0",
+ Namespace: namespace,
+ },
+ },
+ }
+
+ return kutil.DeleteObjects(ctx, k8sClient, objs...)
+}
+
+// DeleteGrafanaByRole deletes the monitoring stack for the shoot owner.
+func DeleteGrafanaByRole(ctx context.Context, k8sClient kubernetes.Interface, namespace, role string) error {
+ if k8sClient == nil {
+ return fmt.Errorf("require kubernetes client")
+ }
+
+ listOptions := metav1.ListOptions{
+ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", "component", "grafana", "role", role),
+ }
+
+ deletePropagation := metav1.DeletePropagationForeground
+ if err := k8sClient.Kubernetes().AppsV1().Deployments(namespace).DeleteCollection(
+ ctx,
+ metav1.DeleteOptions{
+ PropagationPolicy: &deletePropagation,
+ }, listOptions); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+
+ if err := k8sClient.Kubernetes().CoreV1().ConfigMaps(namespace).DeleteCollection(
+ ctx, metav1.DeleteOptions{}, listOptions); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+
+ if err := k8sClient.Kubernetes().ExtensionsV1beta1().Ingresses(namespace).DeleteCollection(
+ ctx, metav1.DeleteOptions{}, listOptions); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+
+ if err := k8sClient.Kubernetes().CoreV1().Secrets(namespace).DeleteCollection(
+ ctx, metav1.DeleteOptions{}, listOptions); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+
+ if err := k8sClient.Kubernetes().CoreV1().Services(namespace).Delete(
+ ctx, fmt.Sprintf("grafana-%s", role), metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+// GetDomainInfoFromAnnotations returns the provider and the domain that is specified in the give annotations.
+func GetDomainInfoFromAnnotations(annotations map[string]string) (provider string, domain string, includeZones, excludeZones []string, err error) {
+ if annotations == nil {
+ return "", "", nil, nil, fmt.Errorf("domain secret has no annotations")
+ }
+
+ if providerAnnotation, ok := annotations[DNSProvider]; ok {
+ provider = providerAnnotation
+ }
+
+ if domainAnnotation, ok := annotations[DNSDomain]; ok {
+ domain = domainAnnotation
+ }
+
+ if includeZonesAnnotation, ok := annotations[DNSIncludeZones]; ok {
+ includeZones = strings.Split(includeZonesAnnotation, ",")
+ }
+ if excludeZonesAnnotation, ok := annotations[DNSExcludeZones]; ok {
+ excludeZones = strings.Split(excludeZonesAnnotation, ",")
+ }
+
+ if len(domain) == 0 {
+ return "", "", nil, nil, fmt.Errorf("missing dns domain annotation on domain secret")
+ }
+ if len(provider) == 0 {
+ return "", "", nil, nil, fmt.Errorf("missing dns provider annotation on domain secret")
+ }
+
+ return
+}
+
+// CurrentReplicaCount returns the current replicaCount for the given deployment.
+func CurrentReplicaCount(ctx context.Context, client client.Client, namespace, deploymentName string) (int32, error) {
+ deployment := &appsv1.Deployment{}
+ if err := client.Get(ctx, kutil.Key(namespace, deploymentName), deployment); err != nil && !apierrors.IsNotFound(err) {
+ return 0, err
+ }
+ if deployment.Spec.Replicas == nil {
+ return 0, nil
+ }
+ return *deployment.Spec.Replicas, nil
+}
+
+// RespectShootSyncPeriodOverwrite checks whether to respect the sync period overwrite of a Shoot or not.
+func RespectShootSyncPeriodOverwrite(respectSyncPeriodOverwrite bool, shoot *gardencorev1beta1.Shoot) bool {
+ return respectSyncPeriodOverwrite || shoot.Namespace == v1beta1constants.GardenNamespace
+}
+
+// ShouldIgnoreShoot determines whether a Shoot should be ignored or not.
+func ShouldIgnoreShoot(respectSyncPeriodOverwrite bool, shoot *gardencorev1beta1.Shoot) bool {
+ if !RespectShootSyncPeriodOverwrite(respectSyncPeriodOverwrite, shoot) {
+ return false
+ }
+
+ value, ok := shoot.Annotations[ShootIgnore]
+ if !ok {
+ return false
+ }
+
+ ignore, _ := strconv.ParseBool(value)
+ return ignore
+}
+
+// IsShootFailed checks if a Shoot is failed.
+func IsShootFailed(shoot *gardencorev1beta1.Shoot) bool {
+ lastOperation := shoot.Status.LastOperation
+
+ return lastOperation != nil && lastOperation.State == gardencorev1beta1.LastOperationStateFailed &&
+ shoot.Generation == shoot.Status.ObservedGeneration &&
+ shoot.Status.Gardener.Version == version.Get().GitVersion
+}
+
+// IsNowInEffectiveShootMaintenanceTimeWindow checks if the current time is in the effective
+// maintenance time window of the Shoot.
+func IsNowInEffectiveShootMaintenanceTimeWindow(shoot *gardencorev1beta1.Shoot) bool {
+ return EffectiveShootMaintenanceTimeWindow(shoot).Contains(time.Now())
+}
+
+// LastReconciliationDuringThisTimeWindow returns true if is contained in the given effective maintenance time
+// window of the shoot and if the did not happen longer than the longest possible duration of a
+// maintenance time window.
+func LastReconciliationDuringThisTimeWindow(shoot *gardencorev1beta1.Shoot) bool {
+ if shoot.Status.LastOperation == nil {
+ return false
+ }
+
+ var (
+ timeWindow = EffectiveShootMaintenanceTimeWindow(shoot)
+ now = time.Now()
+ lastReconciliation = shoot.Status.LastOperation.LastUpdateTime.Time
+ )
+
+ return timeWindow.Contains(lastReconciliation) && now.UTC().Sub(lastReconciliation.UTC()) <= gardencorev1beta1.MaintenanceTimeWindowDurationMaximum
+}
+
+// IsObservedAtLatestGenerationAndSucceeded checks whether the Shoot's generation has changed or if the LastOperation status
+// is Succeeded.
+func IsObservedAtLatestGenerationAndSucceeded(shoot *gardencorev1beta1.Shoot) bool {
+ lastOperation := shoot.Status.LastOperation
+ return shoot.Generation == shoot.Status.ObservedGeneration &&
+ (lastOperation != nil && lastOperation.State == gardencorev1beta1.LastOperationStateSucceeded)
+}
+
+// SyncPeriodOfShoot determines the sync period of the given shoot.
+//
+// If no overwrite is allowed, the defaultMinSyncPeriod is returned.
+// Otherwise, the overwrite is parsed. If an error occurs or it is smaller than the defaultMinSyncPeriod,
+// the defaultMinSyncPeriod is returned. Otherwise, the overwrite is returned.
+func SyncPeriodOfShoot(respectSyncPeriodOverwrite bool, defaultMinSyncPeriod time.Duration, shoot *gardencorev1beta1.Shoot) time.Duration {
+ if !RespectShootSyncPeriodOverwrite(respectSyncPeriodOverwrite, shoot) {
+ return defaultMinSyncPeriod
+ }
+
+ syncPeriodOverwrite, ok := shoot.Annotations[ShootSyncPeriod]
+ if !ok {
+ return defaultMinSyncPeriod
+ }
+
+ syncPeriod, err := time.ParseDuration(syncPeriodOverwrite)
+ if err != nil {
+ return defaultMinSyncPeriod
+ }
+
+ if syncPeriod < defaultMinSyncPeriod {
+ return defaultMinSyncPeriod
+ }
+ return syncPeriod
+}
+
+// EffectiveMaintenanceTimeWindow cuts a maintenance time window at the end with a guess of 15 minutes. It is subtracted from the end
+// of a maintenance time window to use a best-effort kind of finishing the operation before the end.
+// Generally, we can't make sure that the maintenance operation is done by the end of the time window anyway (considering large
+// clusters with hundreds of nodes, a rolling update will take several hours).
+func EffectiveMaintenanceTimeWindow(timeWindow *utils.MaintenanceTimeWindow) *utils.MaintenanceTimeWindow {
+ return timeWindow.WithEnd(timeWindow.End().Add(0, -15, 0))
+}
+
+// EffectiveShootMaintenanceTimeWindow returns the effective MaintenanceTimeWindow of the given Shoot.
+func EffectiveShootMaintenanceTimeWindow(shoot *gardencorev1beta1.Shoot) *utils.MaintenanceTimeWindow {
+ maintenance := shoot.Spec.Maintenance
+ if maintenance == nil || maintenance.TimeWindow == nil {
+ return utils.AlwaysTimeWindow
+ }
+
+ timeWindow, err := utils.ParseMaintenanceTimeWindow(maintenance.TimeWindow.Begin, maintenance.TimeWindow.End)
+ if err != nil {
+ return utils.AlwaysTimeWindow
+ }
+
+ return EffectiveMaintenanceTimeWindow(timeWindow)
+}
+
+// GardenEtcdEncryptionSecretName returns the name to the 'backup' of the etcd encryption secret in the Garden cluster.
+func GardenEtcdEncryptionSecretName(shootName string) string {
+ return fmt.Sprintf("%s.%s", shootName, EtcdEncryptionSecretName)
+}
+
+// ReadServiceAccountSigningKeySecret reads the signing key secret to extract the signing key.
+// It errors if there is no value at ServiceAccountSigningKeySecretDataKey.
+func ReadServiceAccountSigningKeySecret(secret *corev1.Secret) (string, error) {
+ data, ok := secret.Data[ServiceAccountSigningKeySecretDataKey]
+ if !ok {
+ return "", fmt.Errorf("no signing key secret in secret %s/%s at .Data.%s", secret.Namespace, secret.Name, ServiceAccountSigningKeySecretDataKey)
+ }
+
+ return string(data), nil
+}
+
+// GetServiceAccountSigningKeySecret gets the signing key from the secret with the given name and namespace.
+func GetServiceAccountSigningKeySecret(ctx context.Context, c client.Client, shootNamespace, secretName string) (string, error) {
+ secret := &corev1.Secret{}
+ if err := c.Get(ctx, kutil.Key(shootNamespace, secretName), secret); err != nil {
+ return "", err
+ }
+
+ return ReadServiceAccountSigningKeySecret(secret)
+}
+
+// GetAPIServerDomain returns the fully qualified domain name of for the api-server for the Shoot cluster. The
+// end result is 'api.'.
+func GetAPIServerDomain(domain string) string {
+ return fmt.Sprintf("%s.%s", APIServerPrefix, domain)
+}
+
+// GetSecretFromSecretRef gets the Secret object from .
+func GetSecretFromSecretRef(ctx context.Context, c client.Client, secretRef *corev1.SecretReference) (*corev1.Secret, error) {
+ secret := &corev1.Secret{}
+ if err := c.Get(ctx, kutil.Key(secretRef.Namespace, secretRef.Name), secret); err != nil {
+ return nil, err
+ }
+ return secret, nil
+}
+
+// CheckIfDeletionIsConfirmed returns whether the deletion of an object is confirmed or not.
+func CheckIfDeletionIsConfirmed(obj metav1.Object) error {
+ annotations := obj.GetAnnotations()
+ if annotations == nil {
+ return annotationRequiredError()
+ }
+
+ value := annotations[ConfirmationDeletion]
+ if confirmed, err := strconv.ParseBool(value); err != nil || !confirmed {
+ return annotationRequiredError()
+ }
+ return nil
+}
+
+func annotationRequiredError() error {
+ return fmt.Errorf("must have a %q annotation to delete", ConfirmationDeletion)
+}
+
+// ConfirmDeletion adds Gardener's deletion confirmation annotation to the given object and sends an UPDATE request.
+func ConfirmDeletion(ctx context.Context, c client.Client, obj client.Object) error {
+ return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ if err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil {
+ if !apierrors.IsNotFound(err) {
+ return err
+ }
+ return nil
+ }
+
+ existing := obj.DeepCopyObject()
+
+ acc, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ kutil.SetMetaDataAnnotation(acc, ConfirmationDeletion, "true")
+ kutil.SetMetaDataAnnotation(acc, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String())
+
+ if reflect.DeepEqual(existing, obj) {
+ return nil
+ }
+
+ return c.Update(ctx, obj)
+ })
+}
+
+// ExtensionID returns an identifier for the given extension kind/type.
+func ExtensionID(extensionKind, extensionType string) string {
+ return fmt.Sprintf("%s/%s", extensionKind, extensionType)
+}
+
+// DeleteDeploymentsHavingDeprecatedRoleLabelKey deletes the Deployments with the passed object keys if
+// the corresponding Deployment .spec.selector contains the deprecated "garden.sapcloud.io/role" label key.
+func DeleteDeploymentsHavingDeprecatedRoleLabelKey(ctx context.Context, c client.Client, keys []client.ObjectKey) error {
+ for _, key := range keys {
+ deployment := &appsv1.Deployment{}
+ if err := c.Get(ctx, key, deployment); err != nil {
+ if apierrors.IsNotFound(err) {
+ continue
+ }
+
+ return err
+ }
+
+ if _, ok := deployment.Spec.Selector.MatchLabels[v1beta1constants.DeprecatedGardenRole]; ok {
+ if err := c.Delete(ctx, deployment); client.IgnoreNotFound(err) != nil {
+ return err
+ }
+
+ if err := kutil.WaitUntilResourceDeleted(ctx, c, deployment, 2*time.Second); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/chart/chart.go b/vendor/github.com/gardener/gardener/pkg/utils/chart/chart.go
new file mode 100644
index 0000000..f8f5ad8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/chart/chart.go
@@ -0,0 +1,203 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "context"
+
+ "github.com/gardener/gardener/pkg/chartrenderer"
+ gardenerkubernetes "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/utils"
+ "github.com/gardener/gardener/pkg/utils/imagevector"
+
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Interface represents a Helm chart that can be applied and deleted.
+type Interface interface {
+ // Apply applies this chart in the given namespace using the given ChartApplier. Before applying the chart,
+ // it collects its values, injecting images and merging the given values as needed.
+ Apply(context.Context, gardenerkubernetes.ChartApplier, string, imagevector.ImageVector, string, string, map[string]interface{}) error
+ // Render renders this chart in the given namespace using the given chartRenderer. Before rendering the chart,
+ // it collects its values, injecting images and merging the given values as needed.
+ Render(chartrenderer.Interface, string, imagevector.ImageVector, string, string, map[string]interface{}) (string, []byte, error)
+ // Delete deletes this chart's objects from the given namespace.
+ Delete(context.Context, client.Client, string) error
+}
+
+// Chart represents a Helm chart (and its sub-charts) that can be applied and deleted.
+type Chart struct {
+ Name string
+ Path string
+ Images []string
+ Objects []*Object
+ SubCharts []*Chart
+}
+
+// Object represents an object deployed by a Chart.
+type Object struct {
+ Type client.Object
+ Name string
+}
+
+// Apply applies this chart in the given namespace using the given ChartApplier. Before applying the chart,
+// it collects its values, injecting images and merging the given values as needed.
+func (c *Chart) Apply(
+ ctx context.Context,
+ chartApplier gardenerkubernetes.ChartApplier,
+ namespace string,
+ imageVector imagevector.ImageVector,
+ runtimeVersion, targetVersion string,
+ additionalValues map[string]interface{},
+) error {
+
+ // Get values with injected images
+ values, err := c.injectImages(imageVector, runtimeVersion, targetVersion)
+ if err != nil {
+ return err
+ }
+
+ // Apply chart
+ err = chartApplier.Apply(ctx, c.Path, namespace, c.Name, gardenerkubernetes.Values(utils.MergeMaps(values, additionalValues)))
+ if err != nil {
+ return errors.Wrapf(err, "could not apply chart '%s' in namespace '%s'", c.Name, namespace)
+ }
+ return nil
+}
+
+// Render renders this chart in the given namespace using the given chartRenderer. Before rendering the chart,
+// it collects its values, injecting images and merging the given values as needed.
+func (c *Chart) Render(
+ chartRenderer chartrenderer.Interface,
+ namespace string,
+ imageVector imagevector.ImageVector,
+ runtimeVersion, targetVersion string,
+ additionalValues map[string]interface{},
+) (string, []byte, error) {
+
+ // Get values with injected images
+ values, err := c.injectImages(imageVector, runtimeVersion, targetVersion)
+ if err != nil {
+ return "", nil, err
+ }
+
+ // Apply chart
+ rc, err := chartRenderer.Render(c.Path, c.Name, namespace, utils.MergeMaps(values, additionalValues))
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "could not render chart '%s' in namespace '%s'", c.Name, namespace)
+ }
+ return rc.ChartName, rc.Manifest(), nil
+}
+
+// injectImages collects returns a values map with injected images, including sub-charts.
+func (c *Chart) injectImages(
+ imageVector imagevector.ImageVector,
+ runtimeVersion, targetVersion string,
+) (map[string]interface{}, error) {
+
+ // Inject images
+ values := make(map[string]interface{})
+ var err error
+ if len(c.Images) > 0 {
+ values, err = InjectImages(values, imageVector, c.Images, imagevector.RuntimeVersion(runtimeVersion), imagevector.TargetVersion(targetVersion))
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not inject chart '%s' images", c.Name)
+ }
+ }
+
+ // Add subchart values
+ for _, sc := range c.SubCharts {
+ scValues, err := sc.injectImages(imageVector, runtimeVersion, targetVersion)
+ if err != nil {
+ return nil, err
+ }
+ values[sc.Name] = scValues
+ }
+
+ return values, nil
+}
+
+// Delete deletes this chart's objects from the given namespace using the given client.
+func (c *Chart) Delete(ctx context.Context, client client.Client, namespace string) error {
+ // Delete objects
+ for _, o := range c.Objects {
+ if err := o.Delete(ctx, client, namespace); err != nil {
+ return errors.Wrap(err, "could not delete chart object")
+ }
+ }
+
+ // Delete subchart objects
+ for _, sc := range c.SubCharts {
+ if err := sc.Delete(ctx, client, namespace); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Delete deletes this object from the given namespace using the given client.
+func (o *Object) Delete(ctx context.Context, c client.Client, namespace string) error {
+ obj := o.Type.DeepCopyObject().(client.Object)
+ kind := obj.GetObjectKind().GroupVersionKind().Kind
+ key := objectKey(namespace, o.Name)
+ if err := c.Get(ctx, key, obj); err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return errors.Wrapf(err, "could not get %s '%s'", kind, key.String())
+ }
+ if err := c.Delete(ctx, obj); err != nil {
+ return errors.Wrapf(err, "could not delete %s '%s'", kind, key.String())
+ }
+ return nil
+}
+
+func objectKey(namespace, name string) client.ObjectKey {
+ return client.ObjectKey{Namespace: namespace, Name: name}
+}
+
+// CopyValues creates a shallow copy of the given Values.
+func CopyValues(values map[string]interface{}) map[string]interface{} {
+ copiedValues := make(map[string]interface{}, len(values))
+ for k, v := range values {
+ copiedValues[k] = v
+ }
+ return copiedValues
+}
+
+// ImageMapToValues transforms the given image name to image mapping into chart Values.
+func ImageMapToValues(m map[string]*imagevector.Image) map[string]interface{} {
+ out := make(map[string]interface{}, len(m))
+ for k, v := range m {
+ out[k] = v.String()
+ }
+ return out
+}
+
+// InjectImages finds the images with the given names and opts, makes a shallow copy of the given
+// Values and injects a name to image string mapping at the `images` key of that map and returns it.
+func InjectImages(values map[string]interface{}, v imagevector.ImageVector, names []string, opts ...imagevector.FindOptionFunc) (map[string]interface{}, error) {
+ images, err := imagevector.FindImages(v, names, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ values = CopyValues(values)
+ values["images"] = ImageMapToValues(images)
+ return values, nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/checksums.go b/vendor/github.com/gardener/gardener/pkg/utils/checksums.go
new file mode 100644
index 0000000..c53e83b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/checksums.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/json"
+ "sort"
+)
+
+// ComputeSecretCheckSum computes the sha256 checksum of secret data.
+func ComputeSecretCheckSum(data map[string][]byte) string {
+ var (
+ hash string
+ keys []string
+ )
+
+ for k := range data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ hash += ComputeSHA256Hex(data[k])
+ }
+
+ return ComputeSHA256Hex([]byte(hash))
+}
+
+// ComputeChecksum computes a SHA256 checksum for the give map.
+func ComputeChecksum(data interface{}) string {
+ jsonString, err := json.Marshal(data)
+ if err != nil {
+ return ""
+ }
+ return ComputeSHA256Hex(jsonString)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/context/context.go b/vendor/github.com/gardener/gardener/pkg/utils/context/context.go
new file mode 100644
index 0000000..0d339bd
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/context/context.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package context
+
+import (
+ "context"
+ "time"
+)
+
+// FromStopChannel creates a new context from a given stop channel.
+func FromStopChannel(stopCh <-chan struct{}) context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ defer cancel()
+ <-stopCh
+ }()
+
+ return ctx
+}
+
+type ops struct{}
+
+// WithTimeout returns the context with the given timeout and a CancelFunc to cleanup its resources.
+func (ops) WithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
+ return context.WithTimeout(ctx, timeout)
+}
+
+// DefaultOps returns the default Ops implementation.
+func DefaultOps() Ops {
+ return ops{}
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/context/types.go b/vendor/github.com/gardener/gardener/pkg/utils/context/types.go
new file mode 100644
index 0000000..ad7ddf0
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/context/types.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package context
+
+import (
+ "context"
+ "time"
+)
+
+// Ops are operations to do with a context. They mimic the functions from the context package.
+type Ops interface {
+ // WithTimeout returns a new context with the given timeout that can be canceled with the returned function.
+ WithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/encoding.go b/vendor/github.com/gardener/gardener/pkg/utils/encoding.go
new file mode 100644
index 0000000..13cf9d9
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/encoding.go
@@ -0,0 +1,184 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "sort"
+ "strconv"
+)
+
+// EncodeBase64 takes a byte slice and returns the Base64-encoded string.
+func EncodeBase64(in []byte) string {
+ encodedLength := base64.StdEncoding.EncodedLen(len(in))
+ buffer := make([]byte, encodedLength)
+ out := buffer[0:encodedLength]
+ base64.StdEncoding.Encode(out, in)
+ return string(out)
+}
+
+// DecodeBase64 takes a Base64-encoded string and returns the decoded byte slice.
+func DecodeBase64(in string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(in)
+}
+
+// EncodePrivateKey takes a RSA private key object, encodes it to the PEM format, and returns it as
+// a byte slice.
+func EncodePrivateKey(key *rsa.PrivateKey) []byte {
+ return pem.EncodeToMemory(&pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: x509.MarshalPKCS1PrivateKey(key),
+ })
+}
+
+// EncodePrivateKeyInPKCS8 takes a RSA private key object, encodes it to the PKCS8 format, and returns it as
+// a byte slice.
+func EncodePrivateKeyInPKCS8(key *rsa.PrivateKey) ([]byte, error) {
+ bytes, err := x509.MarshalPKCS8PrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ return pem.EncodeToMemory(&pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: bytes,
+ }), nil
+}
+
+// DecodeRSAPrivateKeyFromPKCS8 takes a byte slice, decodes it from the PKCS8 format, tries to convert it
+// to an rsa.PrivateKey object, and returns it. In case an error occurs, it returns the error.
+func DecodeRSAPrivateKeyFromPKCS8(bytes []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(bytes)
+ if block == nil || block.Type != "RSA PRIVATE KEY" {
+ return nil, errors.New("could not decode the PEM-encoded RSA private key")
+ }
+ key, err := x509.ParsePKCS8PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ rsaKey, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("the decoded key is not an RSA private key")
+ }
+ return rsaKey, nil
+}
+
+// DecodePrivateKey takes a byte slice, decodes it from the PEM format, converts it to an rsa.PrivateKey
+// object, and returns it. In case an error occurs, it returns the error.
+func DecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(bytes)
+ if block == nil || block.Type != "RSA PRIVATE KEY" {
+ return nil, errors.New("could not decode the PEM-encoded RSA private key")
+ }
+ return x509.ParsePKCS1PrivateKey(block.Bytes)
+}
+
+// EncodeCertificate takes a certificate as a byte slice, encodes it to the PEM format, and returns
+// it as byte slice.
+func EncodeCertificate(certificate []byte) []byte {
+ return pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: certificate,
+ })
+}
+
+// DecodeCertificate takes a byte slice, decodes it from the PEM format, converts it to an x509.Certificate
+// object, and returns it. In case an error occurs, it returns the error.
+func DecodeCertificate(bytes []byte) (*x509.Certificate, error) {
+ block, _ := pem.Decode(bytes)
+ if block == nil || block.Type != "CERTIFICATE" {
+ return nil, errors.New("could not decode the PEM-encoded certificate")
+ }
+ return x509.ParseCertificate(block.Bytes)
+}
+
+// SHA1 takes a byte slice and returns the sha1-hashed byte slice.
+func SHA1(in []byte) []byte {
+ s := sha1.New()
+ _, _ = s.Write(in)
+ return s.Sum(nil)
+}
+
+// SHA256 takes a byte slice and returns the sha256-hashed byte slice.
+func SHA256(in []byte) []byte {
+ h := sha256.Sum256(in)
+ return h[:]
+}
+
+// EncodeSHA1 takes a byte slice and returns the sha1-hashed string (base64-encoded).
+func EncodeSHA1(in []byte) string {
+ return EncodeBase64(SHA1(in))
+}
+
+// CreateSHA1Secret takes a username and a password and returns a sha1-schemed credentials pair as string.
+func CreateSHA1Secret(username, password []byte) string {
+ credentials := append([]byte(username), ":{SHA}"...)
+ credentials = append(credentials, EncodeSHA1(password)...)
+ return EncodeBase64(credentials)
+}
+
+// ComputeSHA1Hex computes the hexadecimal representation of the SHA1 hash of the given input byte
+// slice , converts it to a string and returns it (length of returned string is 40 characters).
+func ComputeSHA1Hex(in []byte) string {
+ return hex.EncodeToString(SHA1(in))
+}
+
+// ComputeSHA256Hex computes the hexadecimal representation of the SHA256 hash of the given input byte
+// slice , converts it to a string and returns it.
+func ComputeSHA256Hex(in []byte) string {
+ return hex.EncodeToString(SHA256(in))
+}
+
+// HashForMap creates a hash value for a map of type map[string]interface{} and returns it.
+func HashForMap(m map[string]interface{}) string {
+ var (
+ hash string
+ keys []string
+ )
+
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ switch v := m[k].(type) {
+ case string:
+ hash += ComputeSHA256Hex([]byte(v))
+ case int:
+ hash += ComputeSHA256Hex([]byte(strconv.Itoa(v)))
+ case bool:
+ hash += ComputeSHA256Hex([]byte(strconv.FormatBool(v)))
+ case []string:
+ for _, val := range v {
+ hash += ComputeSHA256Hex([]byte(val))
+ }
+ case map[string]interface{}:
+ hash += HashForMap(v)
+ case []map[string]interface{}:
+ for _, val := range v {
+ hash += HashForMap(val)
+ }
+ }
+ }
+
+ return ComputeSHA256Hex([]byte(hash))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go b/vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go
new file mode 100644
index 0000000..b4003eb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go
@@ -0,0 +1,262 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+type withSuppressed struct {
+ cause error
+ suppressed error
+}
+
+func (w *withSuppressed) Error() string {
+ return fmt.Sprintf("%s, suppressed: %s", w.cause.Error(), w.suppressed.Error())
+}
+
+func (w *withSuppressed) Cause() error {
+ return w.cause
+}
+
+func (w *withSuppressed) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ _, _ = fmt.Fprintf(s, "%+v\nsuppressed: %+v", w.Cause(), w.suppressed)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ _, _ = io.WriteString(s, w.Error())
+ }
+}
+
+func (w *withSuppressed) Suppressed() error {
+ return w.suppressed
+}
+
+// Suppressed retrieves the suppressed error of the given error, if any.
+// An error has a suppressed error if it implements the following interface:
+//
+// type suppressor interface {
+// Suppressed() error
+// }
+// If the error does not implement the interface, nil is returned.
+func Suppressed(err error) error {
+ type suppressor interface {
+ Suppressed() error
+ }
+ if w, ok := err.(suppressor); ok {
+ return w.Suppressed()
+ }
+ return nil
+}
+
+// WithSuppressed annotates err with a suppressed error.
+// If err is nil, WithSuppressed returns nil.
+// If suppressed is nil, WithSuppressed returns err.
+func WithSuppressed(err, suppressed error) error {
+ if err == nil || suppressed == nil {
+ return err
+ }
+
+ return &withSuppressed{
+ cause: err,
+ suppressed: suppressed,
+ }
+}
+
+// reconciliationError implements ErrorIDer and Causer
+type reconciliationError struct {
+ error
+ errorID string
+}
+
+// WithID annotates the error with the given errorID which can afterwards be retrieved by ErrorID()
+func WithID(id string, err error) error {
+ return &reconciliationError{err, id}
+}
+
+// ErrorID implements the errorIDer interface and returns the id of the reconciliationError
+func (t *reconciliationError) ErrorID() string {
+ return t.errorID
+}
+
+// Cause implements the causer interface and returns the underlying error
+func (t *reconciliationError) Cause() error {
+ return t.error
+}
+
+// GetID returns the ID of the error if possible.
+// If err does not implement ErrorID or is nil an empty string will be returned.
+func GetID(err error) string {
+ type errorIDer interface {
+ ErrorID() string
+ }
+
+ var id string
+ if err != nil {
+ if errWithID, ok := err.(errorIDer); ok {
+ id = errWithID.ErrorID()
+ }
+ }
+ return id
+}
+
+// The ErrorContext holds the lastError IDs from the previous reconciliaton and the IDs of the errors that are processed in this context during the current reconciliation
+type ErrorContext struct {
+ name string
+ lastErrorIDs []string
+ errorIDs map[string]struct{}
+}
+
+// NewErrorContext creates a new error context with the given name and lastErrors from the previous reconciliation
+func NewErrorContext(name string, lastErrorIDs []string) *ErrorContext {
+ return &ErrorContext{
+ name: name,
+ lastErrorIDs: lastErrorIDs,
+ errorIDs: map[string]struct{}{},
+ }
+}
+
+// AddErrorID adds an error ID which will be tracked by the context and panics if more than one error have the same ID
+func (e *ErrorContext) AddErrorID(errorID string) {
+ if e.HasErrorWithID(errorID) {
+ panic(fmt.Sprintf("Error with id %q already exists in error context %q", errorID, e.name))
+ }
+ e.errorIDs[errorID] = struct{}{}
+}
+
+// HasErrorWithID checks if the ErrorContext already contains an error with id errorID
+func (e *ErrorContext) HasErrorWithID(errorID string) bool {
+ _, ok := e.errorIDs[errorID]
+ return ok
+}
+
+// HasLastErrorWithID checks if the previous reconciliation had encountered an error with id errorID
+func (e *ErrorContext) HasLastErrorWithID(errorID string) bool {
+ for _, lastErrorID := range e.lastErrorIDs {
+ if errorID == lastErrorID {
+ return true
+ }
+ }
+ return false
+}
+
+type cancelError struct{}
+
+func (*cancelError) Error() string {
+ return "Canceled"
+}
+
+// Cancel returns an error which will cause the HandleErrors function to stop executing tasks without triggering its FailureHandler.
+func Cancel() error {
+ return &cancelError{}
+}
+
+// WasCanceled checks to see if the HandleErrors function was canceled manually. It can be used to check if execution after HandleErrors should be stopped without returning an error
+func WasCanceled(err error) bool {
+ _, ok := err.(*cancelError)
+ return ok
+}
+
+// FailureHandler is a function which is called when an error occurs
+type FailureHandler func(string, error) error
+
+// SuccessHandler is called when a task completes successfully
+type SuccessHandler func(string) error
+
+// TaskFunc is an interface for a task which should belong to an ErrorContext and can trigger OnSuccess and OnFailure callbacks depending on whether it completes successfully or not
+type TaskFunc interface {
+ Do(errorContext *ErrorContext) (string, error)
+}
+
+// taskFunc implements TaskFunc
+type taskFunc func(*ErrorContext) (string, error)
+
+func (f taskFunc) Do(errorContext *ErrorContext) (string, error) {
+ return f(errorContext)
+}
+
+func defaultFailureHandler(errorID string, err error) error {
+ err = fmt.Errorf("%s failed (%v)", errorID, err)
+ return WithID(errorID, err)
+}
+
+//ToExecute takes an errorID and a function and creates a TaskFunc from them.
+func ToExecute(errorID string, task func() error) TaskFunc {
+ return taskFunc(func(errorContext *ErrorContext) (string, error) {
+ errorContext.AddErrorID(errorID)
+ err := task()
+ if err != nil {
+ return errorID, err
+ }
+ return errorID, nil
+ })
+}
+
+// HandleErrors takes a reference to an ErrorContext, onSuccess and onFailure callback functions and a variadic list of taskFuncs.
+// It sequentially adds the Tasks' errorIDs to the provided ErrorContext and executes them.
+// If the ErrorContext has errors from the previous reconciliation and the tasks which caused errors complete successfully OnSuccess is called.
+// If a task fails OnFailure is called
+func HandleErrors(errorContext *ErrorContext, onSuccess SuccessHandler, onFailure FailureHandler, tasks ...TaskFunc) error {
+ for _, task := range tasks {
+ errorID, err := task.Do(errorContext)
+ if err != nil && !WasCanceled(err) {
+ return handleFailure(onFailure, errorID, err)
+ }
+ if handlerErr := handleSuccess(errorContext, onSuccess, errorID); handlerErr != nil {
+ return handlerErr
+ }
+ if WasCanceled(err) {
+ return err
+ }
+ }
+ return nil
+}
+
+func handleFailure(onFailure FailureHandler, errorID string, err error) error {
+ if onFailure != nil {
+ return onFailure(errorID, err)
+ }
+ return defaultFailureHandler(errorID, err)
+}
+
+func handleSuccess(errorContext *ErrorContext, onSuccess SuccessHandler, errorID string) error {
+ if onSuccess != nil && errorContext.HasLastErrorWithID(errorID) {
+ if err := onSuccess(errorID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Errors returns a list of all nested errors of the given error.
+// If the error is nil, nil is returned.
+// If the error is a multierror, it returns all its errors.
+// Otherwise, it returns a slice containing the error as single element.
+func Errors(err error) []error {
+ if err == nil {
+ return nil
+ }
+ if errs, ok := err.(*multierror.Error); ok {
+ return errs.Errors
+ }
+ return []error{err}
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go b/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go
new file mode 100644
index 0000000..69e9455
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// NewErrorFormatFuncWithPrefix creates a new multierror.ErrorFormatFunc which can be used as an ErrorFormat on
+// multierror.Error instances. The error string is prefixed with , all errors are concatenated at the end.
+// This is similar to multierror.ListFormatFunc but does not use any escape sequences, which will look weird in
+// the status of Kubernetes objects or controller logs.
+func NewErrorFormatFuncWithPrefix(prefix string) multierror.ErrorFormatFunc {
+ return func(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("%s: 1 error occurred: %s", prefix, es[0])
+ }
+
+ combinedMsg := ""
+ for i, err := range es {
+ if i > 0 {
+ combinedMsg += ", "
+ }
+ combinedMsg += err.Error()
+ }
+
+ return fmt.Sprintf("%s: %d errors occurred: [%s]", prefix, len(es), combinedMsg)
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go
new file mode 100644
index 0000000..459f120
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go
@@ -0,0 +1,387 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package flow provides utilities to construct a directed acyclic computational graph
+// that is then executed and monitored with maximum parallelism.
+package flow
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/gardener/gardener/pkg/logger"
+ utilerrors "github.com/gardener/gardener/pkg/utils/errors"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ logKeyFlow = "flow"
+ logKeyTask = "task"
+)
+
+// ErrorCleaner is called when a task which errored during the previous reconciliation phase completes with success
+type ErrorCleaner func(context.Context, string)
+
+type nodes map[TaskID]*node
+
+func (ns nodes) rootIDs() TaskIDs {
+ roots := NewTaskIDs()
+ for taskID, node := range ns {
+ if node.required == 0 {
+ roots.Insert(taskID)
+ }
+ }
+ return roots
+}
+
+func (ns nodes) getOrCreate(id TaskID) *node {
+ n, ok := ns[id]
+ if !ok {
+ n = &node{}
+ ns[id] = n
+ }
+ return n
+}
+
+// Flow is a validated executable Graph.
+type Flow struct {
+ name string
+ nodes nodes
+}
+
+// Name retrieves the name of a flow.
+func (f *Flow) Name() string {
+ return f.name
+}
+
+// Len retrieves the amount of tasks in a Flow.
+func (f *Flow) Len() int {
+ return len(f.nodes)
+}
+
+// node is a compiled Task that contains the triggered Tasks, the
+// number of triggers the node itself requires and its payload function.
+type node struct {
+ targetIDs TaskIDs
+ required int
+ fn TaskFn
+}
+
+func (n *node) String() string {
+ return fmt.Sprintf("node{targets=%s, required=%d}", n.targetIDs.List(), n.required)
+}
+
+// addTargets adds the given TaskIDs as targets to the node.
+func (n *node) addTargets(taskIDs ...TaskID) {
+ if n.targetIDs == nil {
+ n.targetIDs = NewTaskIDs(TaskIDSlice(taskIDs))
+ return
+ }
+ n.targetIDs.Insert(TaskIDSlice(taskIDs))
+}
+
+// Opts are options for a Flow execution. If they are not set, they
+// are left blank and don't affect the Flow.
+type Opts struct {
+ Logger logrus.FieldLogger
+ ProgressReporter ProgressReporter
+ ErrorCleaner func(ctx context.Context, taskID string)
+ ErrorContext *utilerrors.ErrorContext
+ Context context.Context
+}
+
+// Run starts an execution of a Flow.
+// It blocks until the Flow has finished and returns the error, if any.
+func (f *Flow) Run(opts Opts) error {
+ ctx := opts.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return newExecution(f, opts.Logger, opts.ProgressReporter, opts.ErrorCleaner, opts.ErrorContext).run(ctx)
+}
+
+type nodeResult struct {
+ TaskID TaskID
+ Error error
+}
+
+// Stats are the statistics of a Flow execution.
+type Stats struct {
+ FlowName string
+ All TaskIDs
+ Succeeded TaskIDs
+ Failed TaskIDs
+ Running TaskIDs
+ Pending TaskIDs
+}
+
+// ProgressPercent retrieves the progress of a Flow execution in percent.
+func (s *Stats) ProgressPercent() int32 {
+ progress := (100 * s.Succeeded.Len()) / s.All.Len()
+ return int32(progress)
+}
+
+// Copy deeply copies a Stats object.
+func (s *Stats) Copy() *Stats {
+ return &Stats{
+ s.FlowName,
+ s.All.Copy(),
+ s.Succeeded.Copy(),
+ s.Failed.Copy(),
+ s.Running.Copy(),
+ s.Pending.Copy(),
+ }
+}
+
+// InitialStats creates a new Stats object with the given set of initial TaskIDs.
+// The initial TaskIDs are added to all TaskIDs as well as to the pending ones.
+func InitialStats(flowName string, all TaskIDs) *Stats {
+ return &Stats{
+ flowName,
+ all,
+ NewTaskIDs(),
+ NewTaskIDs(),
+ NewTaskIDs(),
+ all.Copy(),
+ }
+}
+
+func newExecution(flow *Flow, log logrus.FieldLogger, progressReporter ProgressReporter, errorCleaner ErrorCleaner, errorContext *utilerrors.ErrorContext) *execution {
+ all := NewTaskIDs()
+
+ for name := range flow.nodes {
+ all.Insert(name)
+ }
+
+ if log == nil {
+ log = logger.NewNopLogger()
+ }
+ log = log.WithField(logKeyFlow, flow.name)
+
+ return &execution{
+ flow,
+ InitialStats(flow.name, all),
+ nil,
+ log,
+ progressReporter,
+ errorCleaner,
+ errorContext,
+ make(chan *nodeResult),
+ make(map[TaskID]int),
+ }
+}
+
+type execution struct {
+ flow *Flow
+
+ stats *Stats
+ taskErrors []error
+
+ log logrus.FieldLogger
+ progressReporter ProgressReporter
+ errorCleaner ErrorCleaner
+ errorContext *utilerrors.ErrorContext
+
+ done chan *nodeResult
+ triggerCounts map[TaskID]int
+}
+
+func (e *execution) Log() logrus.FieldLogger {
+ return e.log
+}
+
+func (e *execution) runNode(ctx context.Context, id TaskID) {
+ if e.errorContext != nil {
+ e.errorContext.AddErrorID(string(id))
+ }
+ e.stats.Pending.Delete(id)
+ e.stats.Running.Insert(id)
+ go func() {
+ log := e.log.WithField(logKeyTask, id)
+
+ start := time.Now().UTC()
+ log.Debugf("Started")
+ err := e.flow.nodes[id].fn(ctx)
+ end := time.Now().UTC()
+ log.Debugf("Finished, took %s", end.Sub(start))
+
+ if err != nil {
+ log.WithError(err).Error("Error")
+ } else {
+ log.Info("Succeeded")
+ }
+
+ err = errors.Wrapf(err, "task %q failed", id)
+ e.done <- &nodeResult{TaskID: id, Error: err}
+ }()
+}
+
+func (e *execution) updateSuccess(id TaskID) {
+ e.stats.Running.Delete(id)
+ e.stats.Succeeded.Insert(id)
+}
+
+func (e *execution) updateFailure(id TaskID) {
+ e.stats.Running.Delete(id)
+ e.stats.Failed.Insert(id)
+}
+
+func (e *execution) processTriggers(ctx context.Context, id TaskID) {
+ node := e.flow.nodes[id]
+ for target := range node.targetIDs {
+ e.triggerCounts[target]++
+ if e.triggerCounts[target] == e.flow.nodes[target].required {
+ e.runNode(ctx, target)
+ }
+ }
+}
+
+func (e *execution) cleanErrors(ctx context.Context, taskID TaskID) {
+ if e.errorCleaner != nil {
+ e.errorCleaner(ctx, string(taskID))
+ }
+}
+
+func (e *execution) reportProgress(ctx context.Context) {
+ if e.progressReporter != nil {
+ e.progressReporter.Report(ctx, e.stats.Copy())
+ }
+}
+
+func (e *execution) run(ctx context.Context) error {
+ defer close(e.done)
+
+ if e.progressReporter != nil {
+ if err := e.progressReporter.Start(ctx); err != nil {
+ return err
+ }
+ defer e.progressReporter.Stop()
+ }
+
+ e.log.Info("Starting")
+ e.reportProgress(ctx)
+
+ var (
+ cancelErr error
+ roots = e.flow.nodes.rootIDs()
+ )
+ for name := range roots {
+ if cancelErr = ctx.Err(); cancelErr == nil {
+ e.runNode(ctx, name)
+ }
+ }
+ e.reportProgress(ctx)
+
+ for e.stats.Running.Len() > 0 {
+ result := <-e.done
+ if result.Error != nil {
+ e.taskErrors = append(e.taskErrors, utilerrors.WithID(string(result.TaskID), result.Error))
+ e.updateFailure(result.TaskID)
+ } else {
+ e.updateSuccess(result.TaskID)
+ if e.errorContext != nil && e.errorContext.HasLastErrorWithID(string(result.TaskID)) {
+ e.cleanErrors(ctx, result.TaskID)
+ }
+ if cancelErr = ctx.Err(); cancelErr == nil {
+ e.processTriggers(ctx, result.TaskID)
+ }
+ }
+ e.reportProgress(ctx)
+ }
+
+ e.log.Info("Finished")
+ return e.result(cancelErr)
+}
+
+func (e *execution) result(cancelErr error) error {
+ if cancelErr != nil {
+ return &flowCanceled{
+ name: e.flow.name,
+ taskErrors: e.taskErrors,
+ cause: cancelErr,
+ }
+ }
+
+ if len(e.taskErrors) > 0 {
+ return &flowFailed{
+ name: e.flow.name,
+ taskErrors: e.taskErrors,
+ }
+ }
+ return nil
+}
+
+type flowCanceled struct {
+ name string
+ taskErrors []error
+ cause error
+}
+
+type flowFailed struct {
+ name string
+ taskErrors []error
+}
+
+func (f *flowCanceled) Error() string {
+ if len(f.taskErrors) == 0 {
+ return fmt.Sprintf("flow %q was canceled: %v", f.name, f.cause)
+ }
+ return fmt.Sprintf("flow %q was canceled: %v. Encountered task errors: %v",
+ f.name, f.cause, f.taskErrors)
+}
+
+func (f *flowCanceled) Cause() error {
+ return f.cause
+}
+
+func (f *flowFailed) Error() string {
+ return fmt.Sprintf("flow %q encountered task errors: %v", f.name, f.taskErrors)
+}
+
+func (f *flowFailed) Cause() error {
+ return &multierror.Error{Errors: f.taskErrors}
+}
+
+// Errors reports all wrapped Task errors of the given Flow error.
+func Errors(err error) *multierror.Error {
+ switch e := err.(type) {
+ case *flowCanceled:
+ return &multierror.Error{Errors: e.taskErrors}
+ case *flowFailed:
+ return &multierror.Error{Errors: e.taskErrors}
+ }
+ return nil
+}
+
+// Causes reports the causes of all Task errors of the given Flow error.
+func Causes(err error) *multierror.Error {
+ var (
+ errs = Errors(err).Errors
+ causes = make([]error, 0, len(errs))
+ )
+ for _, err := range errs {
+ causes = append(causes, errors.Cause(err))
+ }
+ return &multierror.Error{Errors: causes}
+}
+
+// WasCanceled determines whether the given flow error was caused by cancellation.
+func WasCanceled(err error) bool {
+ _, ok := err.(*flowCanceled)
+ return ok
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go
new file mode 100644
index 0000000..8dc2920
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flow
+
+import (
+ "fmt"
+)
+
+// Task is a unit of work. It has a name, a payload function and a set of dependencies.
+// A is only started once all its dependencies have been completed successfully.
+type Task struct {
+ Name string
+ Fn TaskFn
+ Dependencies TaskIDs
+}
+
+// Spec returns the TaskSpec of a task.
+func (t *Task) Spec() *TaskSpec {
+ return &TaskSpec{
+ t.Fn,
+ t.Dependencies.Copy(),
+ }
+}
+
+// TaskSpec is functional body of a Task, consisting only of the payload function and
+// the dependencies of the Task.
+type TaskSpec struct {
+ Fn TaskFn
+ Dependencies TaskIDs
+}
+
+// Tasks is a mapping from TaskID to TaskSpec.
+type Tasks map[TaskID]*TaskSpec
+
+// Graph is a builder for a Flow.
+type Graph struct {
+ name string
+ tasks Tasks
+}
+
+// Name returns the name of a graph.
+func (g *Graph) Name() string {
+ return g.name
+}
+
+// NewGraph returns a new Graph with the given name.
+func NewGraph(name string) *Graph {
+ return &Graph{name: name, tasks: make(Tasks)}
+}
+
+// Add adds the given Task to the graph.
+// This panics if
+// - There is already a Task present with the same name
+// - One of the dependencies of the Task is not present
+func (g *Graph) Add(task Task) TaskID {
+ id := TaskID(task.Name)
+ if _, ok := g.tasks[id]; ok {
+ panic(fmt.Sprintf("Task with id %q already exists", id))
+ }
+ spec := task.Spec()
+ for dependencyID := range spec.Dependencies {
+ if _, ok := g.tasks[dependencyID]; !ok {
+ panic(fmt.Sprintf("Task %q is missing dependency %q", id, dependencyID))
+ }
+ }
+ g.tasks[id] = task.Spec()
+ return id
+}
+
+// Compile compiles the graph into an executable Flow.
+func (g *Graph) Compile() *Flow {
+ nodes := make(nodes, len(g.tasks))
+
+ for taskName, taskSpec := range g.tasks {
+ for dependencyID := range taskSpec.Dependencies {
+ dependency := nodes.getOrCreate(dependencyID)
+ dependency.addTargets(taskName)
+ }
+
+ node := nodes.getOrCreate(taskName)
+ node.fn = taskSpec.Fn
+ node.required = taskSpec.Dependencies.Len()
+ }
+
+ return &Flow{
+ g.name,
+ nodes,
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter.go
new file mode 100644
index 0000000..4acdc8d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flow
+
+import (
+ "context"
+)
+
+// ProgressReporterFn is continuously called on progress in a flow.
+type ProgressReporterFn func(context.Context, *Stats)
+
+// ProgressReporter is used to report the current progress of a flow.
+type ProgressReporter interface {
+ // Start starts the progress reporter.
+ Start(context.Context) error
+ // Stop stops the progress reporter.
+ Stop()
+ // Report reports the progress using the current statistics.
+ Report(context.Context, *Stats)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_delaying.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_delaying.go
new file mode 100644
index 0000000..c49c411
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_delaying.go
@@ -0,0 +1,117 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flow
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+)
+
+type progressReporterDelaying struct {
+ lock sync.Mutex
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ reporterFn ProgressReporterFn
+ period time.Duration
+ timer *time.Timer
+ pendingProgress *Stats
+ delayProgressReport bool
+}
+
+// NewDelayingProgressReporter returns a new progress reporter with the given function and the configured period. A
+// period of `0` will lead to immediate reports as soon as flow tasks are completed.
+func NewDelayingProgressReporter(reporterFn ProgressReporterFn, period time.Duration) ProgressReporter {
+ return &progressReporterDelaying{
+ reporterFn: reporterFn,
+ period: period,
+ }
+}
+
+func (p *progressReporterDelaying) Start(ctx context.Context) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if p.timer != nil {
+ return fmt.Errorf("progress reporter has already been started")
+ }
+
+ // We store the context on the progressReporterDelaying object so that we can call the reporterFn with the original
+ // context - otherwise, the final state cannot be reported because the cancel context will already be canceled
+ p.ctx = ctx
+
+ if p.period > 0 {
+ p.timer = time.NewTimer(p.period)
+
+ ctx, cancel := context.WithCancel(ctx)
+ p.ctxCancel = cancel
+
+ go p.run(ctx)
+ }
+
+ return nil
+}
+
+func (p *progressReporterDelaying) Stop() {
+ p.lock.Lock()
+
+ if p.ctxCancel != nil {
+ p.ctxCancel()
+ }
+
+ p.ctxCancel = nil
+ p.timer = nil
+ p.lock.Unlock()
+ p.report()
+}
+
+func (p *progressReporterDelaying) Report(_ context.Context, pendingProgress *Stats) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if p.timer != nil && p.delayProgressReport {
+ p.pendingProgress = pendingProgress
+ return
+ }
+
+ p.reporterFn(p.ctx, pendingProgress)
+ p.delayProgressReport = true
+}
+
+func (p *progressReporterDelaying) run(ctx context.Context) {
+ timer := p.timer
+ for timer != nil {
+ select {
+ case <-timer.C:
+ timer.Reset(p.period)
+ p.report()
+
+ case <-ctx.Done():
+ timer.Stop()
+ return
+ }
+ }
+}
+
+func (p *progressReporterDelaying) report() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if p.pendingProgress != nil {
+ p.reporterFn(p.ctx, p.pendingProgress)
+ p.pendingProgress = nil
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_immediate.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_immediate.go
new file mode 100644
index 0000000..d3f293d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/progress_reporter_immediate.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flow
+
+import (
+ "context"
+)
+
+type progressReporterImmediate struct {
+ reporterFn ProgressReporterFn
+}
+
+// NewImmediateProgressReporter returns a new progress reporter with the given function.
+func NewImmediateProgressReporter(reporterFn ProgressReporterFn) ProgressReporter {
+ return progressReporterImmediate{
+ reporterFn: reporterFn,
+ }
+}
+
+func (p progressReporterImmediate) Start(context.Context) error { return nil }
+func (p progressReporterImmediate) Stop() {}
+func (p progressReporterImmediate) Report(ctx context.Context, stats *Stats) {
+ p.reporterFn(ctx, stats)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go
new file mode 100644
index 0000000..d3742ef
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go
@@ -0,0 +1,189 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flow
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/gardener/gardener/pkg/utils/retry"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+var (
+ // ContextWithTimeout is context.WithTimeout. Exposed for testing.
+ ContextWithTimeout = context.WithTimeout
+)
+
+// TaskFn is a payload function of a task.
+type TaskFn func(ctx context.Context) error
+
+// RecoverFn is a function that can recover an error.
+type RecoverFn func(ctx context.Context, err error) error
+
+// EmptyTaskFn is a TaskFn that does nothing (returns nil).
+var EmptyTaskFn TaskFn = func(ctx context.Context) error { return nil }
+
+// SimpleTaskFn converts the given function to a TaskFn, disrespecting any context.Context it is being given.
+// deprecated: Only used during transition period. Do not use for new functions.
+func SimpleTaskFn(f func() error) TaskFn {
+ return func(ctx context.Context) error {
+ return f()
+ }
+}
+
+// SkipIf returns a TaskFn that does nothing if the condition is true, otherwise the function
+// will be executed once called.
+func (t TaskFn) SkipIf(condition bool) TaskFn {
+ if condition {
+ return EmptyTaskFn
+ }
+ return t
+}
+
+// DoIf returns a TaskFn that will be executed if the condition is true when it is called.
+// Otherwise, it will do nothing when called.
+func (t TaskFn) DoIf(condition bool) TaskFn {
+ return t.SkipIf(!condition)
+}
+
+// Timeout returns a TaskFn that is bound to a context which times out.
+func (t TaskFn) Timeout(timeout time.Duration) TaskFn {
+ return func(ctx context.Context) error {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ return t(ctx)
+ }
+}
+
+// RetryUntilTimeout returns a TaskFn that is retried until the timeout is reached.
+func (t TaskFn) RetryUntilTimeout(interval, timeout time.Duration) TaskFn {
+ return func(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ return retry.Until(ctx, interval, func(ctx context.Context) (done bool, err error) {
+ if err := t(ctx); err != nil {
+ return retry.MinorError(err)
+ }
+ return retry.Ok()
+ })
+ }
+}
+
+// ToRecoverFn converts the TaskFn to a RecoverFn that ignores the incoming error.
+func (t TaskFn) ToRecoverFn() RecoverFn {
+ return func(ctx context.Context, _ error) error {
+ return t(ctx)
+ }
+}
+
+// Recover creates a new TaskFn that recovers an error with the given RecoverFn.
+func (t TaskFn) Recover(recoverFn RecoverFn) TaskFn {
+ return func(ctx context.Context) error {
+ if err := t(ctx); err != nil {
+ if ctx.Err() != nil {
+ return err
+ }
+ return recoverFn(ctx, err)
+ }
+ return nil
+ }
+}
+
+// Sequential runs the given TaskFns sequentially.
+func Sequential(fns ...TaskFn) TaskFn {
+ return func(ctx context.Context) error {
+ for _, fn := range fns {
+ if err := fn(ctx); err != nil {
+ return err
+ }
+
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+// Parallel runs the given TaskFns in parallel, collecting their errors in a multierror.
+func Parallel(fns ...TaskFn) TaskFn {
+ return func(ctx context.Context) error {
+ var (
+ wg sync.WaitGroup
+ errors = make(chan error)
+ result error
+ )
+
+ for _, fn := range fns {
+ t := fn
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ errors <- t(ctx)
+ }()
+ }
+
+ go func() {
+ defer close(errors)
+ wg.Wait()
+ }()
+
+ for err := range errors {
+ if err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ return result
+ }
+}
+
+// ParallelExitOnError runs the given TaskFns in parallel and stops execution as soon as one TaskFn returns an error.
+func ParallelExitOnError(fns ...TaskFn) TaskFn {
+ return func(ctx context.Context) error {
+ var (
+ wg sync.WaitGroup
+ errors = make(chan error)
+ subCtx, cancel = context.WithCancel(ctx)
+ )
+ defer cancel()
+
+ for _, fn := range fns {
+ t := fn
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ errors <- t(subCtx)
+ }()
+ }
+
+ go func() {
+ defer close(errors)
+ wg.Wait()
+ }()
+
+ for err := range errors {
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/taskid.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/taskid.go
new file mode 100644
index 0000000..e786fa8
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/taskid.go
@@ -0,0 +1,155 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flow
+
+import "sort"
+
+// TaskID is an id of a task.
+type TaskID string
+
+// TaskIDs retrieves this TaskID as a singleton slice.
+func (t TaskID) TaskIDs() []TaskID {
+ return []TaskID{t}
+}
+
+// TaskIDs is a set of TaskID.
+type TaskIDs map[TaskID]struct{}
+
+// TaskIDs retrieves all TaskIDs as an unsorted slice.
+func (t TaskIDs) TaskIDs() []TaskID {
+ return t.UnsortedList()
+}
+
+// TaskIDer can produce a slice of TaskIDs.
+// Default implementations of this are
+// TaskIDs, TaskID and TaskIDSlice
+type TaskIDer interface {
+ // TaskIDs reports all TaskIDs of this TaskIDer.
+ TaskIDs() []TaskID
+}
+
+// NewTaskIDs returns a new set of TaskIDs initialized
+// to contain all TaskIDs of the given TaskIDers.
+func NewTaskIDs(ids ...TaskIDer) TaskIDs {
+ set := make(TaskIDs)
+ set.Insert(ids...)
+ return set
+}
+
+// Insert inserts the TaskIDs of all TaskIDers into
+// this TaskIDs.
+func (t TaskIDs) Insert(iders ...TaskIDer) TaskIDs {
+ for _, ider := range iders {
+ for _, id := range ider.TaskIDs() {
+ t[id] = struct{}{}
+ }
+ }
+ return t
+}
+
+// InsertIf inserts the TaskIDs of all TaskIDers into
+// this TaskIDs if the given condition evaluates to true.
+func (t TaskIDs) InsertIf(condition bool, iders ...TaskIDer) TaskIDs {
+ if condition {
+ return t.Insert(iders...)
+ }
+ return t
+}
+
+// Delete deletes the TaskIDs of all TaskIDers from
+// this TaskIDs.
+func (t TaskIDs) Delete(iders ...TaskIDer) TaskIDs {
+ for _, ider := range iders {
+ for _, id := range ider.TaskIDs() {
+ delete(t, id)
+ }
+ }
+ return t
+}
+
+// Len returns the amount of TaskIDs this contains.
+func (t TaskIDs) Len() int {
+ return len(t)
+}
+
+// Has checks if the given TaskID is present in this set.
+func (t TaskIDs) Has(id TaskID) bool {
+ _, ok := t[id]
+ return ok
+}
+
+// Copy makes a deep copy of this TaskIDs.
+func (t TaskIDs) Copy() TaskIDs {
+ out := make(TaskIDs, len(t))
+ for k := range t {
+ out[k] = struct{}{}
+ }
+ return out
+}
+
+// UnsortedList returns the elements of this in an unordered slice.
+func (t TaskIDs) UnsortedList() TaskIDSlice {
+ out := make([]TaskID, 0, len(t))
+ for k := range t {
+ out = append(out, k)
+ }
+ return out
+}
+
+// List returns the elements of this in an ordered slice.
+func (t TaskIDs) List() TaskIDSlice {
+ out := make(TaskIDSlice, 0, len(t))
+ for k := range t {
+ out = append(out, k)
+ }
+ sort.Sort(out)
+ return out
+}
+
+// UnsortedStringList returns the elements of this in an unordered string slice.
+func (t TaskIDs) UnsortedStringList() []string {
+ out := make([]string, 0, len(t))
+ for k := range t {
+ out = append(out, string(k))
+ }
+ return out
+}
+
+// StringList returns the elements of this in an ordered string slice.
+func (t TaskIDs) StringList() []string {
+ out := t.UnsortedStringList()
+ sort.Strings(out)
+ return out
+}
+
+// TaskIDSlice is a slice of TaskIDs.
+type TaskIDSlice []TaskID
+
+// TaskIDs returns this as a slice of TaskIDs.
+func (t TaskIDSlice) TaskIDs() []TaskID {
+ return t
+}
+
+func (t TaskIDSlice) Len() int {
+ return len(t)
+}
+
+func (t TaskIDSlice) Less(i1, i2 int) bool {
+ return t[i1] < t[i2]
+}
+
+func (t TaskIDSlice) Swap(i1, i2 int) {
+ t[i1], t[i2] = t[i2], t[i1]
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go
new file mode 100644
index 0000000..6dcfa33
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go
@@ -0,0 +1,328 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package imagevector
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strings"
+
+ versionutils "github.com/gardener/gardener/pkg/utils/version"
+
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ // OverrideEnv is the name of the image vector override environment variable.
+ OverrideEnv = "IMAGEVECTOR_OVERWRITE"
+ // SHA256TagPrefix is the prefix in an image tag for sha256 tags.
+ SHA256TagPrefix = "sha256:"
+)
+
+// Read reads an ImageVector from the given io.Reader.
+func Read(r io.Reader) (ImageVector, error) {
+ vector := struct {
+ Images ImageVector `json:"images" yaml:"images"`
+ }{}
+
+ if err := yaml.NewDecoder(r).Decode(&vector); err != nil {
+ return nil, err
+ }
+ return vector.Images, nil
+}
+
+// ReadFile reads an ImageVector from the file with the given name.
+func ReadFile(name string) (ImageVector, error) {
+ file, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return Read(file)
+}
+
+// ReadGlobalImageVectorWithEnvOverride reads the global image vector and applies the env override. Exposed for testing.
+func ReadGlobalImageVectorWithEnvOverride(filePath string) (ImageVector, error) {
+ imageVector, err := ReadFile(filePath)
+ if err != nil {
+ return nil, err
+ }
+
+ return WithEnvOverride(imageVector)
+}
+
+// mergeImageSources merges the two given ImageSources.
+//
+// If the tag of the override is non-empty, it immediately returns the override.
+// Otherwise, the override is copied, gets the tag of the old source and is returned.
+func mergeImageSources(old, override *ImageSource) *ImageSource {
+ tag := override.Tag
+ if tag == nil {
+ tag = old.Tag
+ }
+
+ runtimeVersion := override.RuntimeVersion
+ if runtimeVersion == nil {
+ runtimeVersion = old.RuntimeVersion
+ }
+
+ targetVersion := override.TargetVersion
+ if targetVersion == nil {
+ targetVersion = old.TargetVersion
+ }
+
+ return &ImageSource{
+ Name: override.Name,
+ RuntimeVersion: runtimeVersion,
+ TargetVersion: targetVersion,
+ Repository: override.Repository,
+ Tag: tag,
+ }
+}
+
+type imageSourceKey struct {
+ Name string
+ RuntimeVersion string
+ TargetVersion string
+}
+
+func computeKey(source *ImageSource) imageSourceKey {
+ var runtimeVersion, targetVersion string
+
+ if source.RuntimeVersion != nil {
+ runtimeVersion = *source.RuntimeVersion
+ }
+ if source.TargetVersion != nil {
+ targetVersion = *source.TargetVersion
+ }
+
+ return imageSourceKey{
+ Name: source.Name,
+ RuntimeVersion: runtimeVersion,
+ TargetVersion: targetVersion,
+ }
+}
+
+// Merge merges the given ImageVectors into one.
+//
+// Images of ImageVectors that are later in the given sequence with the same name override
+// previous images.
+func Merge(vectors ...ImageVector) ImageVector {
+ var (
+ out ImageVector
+ keyToIndex = make(map[imageSourceKey]int)
+ )
+
+ for _, vector := range vectors {
+ for _, image := range vector {
+ key := computeKey(image)
+
+ if idx, ok := keyToIndex[key]; ok {
+ out[idx] = mergeImageSources(out[idx], image)
+ continue
+ }
+
+ keyToIndex[key] = len(out)
+ out = append(out, image)
+ }
+ }
+
+ return out
+}
+
+// WithEnvOverride checks if an environment variable with the key IMAGEVECTOR_OVERWRITE is set.
+// If yes, it reads the ImageVector at the value of the variable and merges it with the given one.
+// Otherwise, it returns the unmodified ImageVector.
+func WithEnvOverride(vector ImageVector) (ImageVector, error) {
+ overwritePath := os.Getenv(OverrideEnv)
+ if len(overwritePath) == 0 {
+ return vector, nil
+ }
+
+ override, err := ReadFile(overwritePath)
+ if err != nil {
+ return nil, err
+ }
+
+ return Merge(vector, override), nil
+}
+
+// String implements Stringer.
+func (o *FindOptions) String() string {
+ var runtimeVersion string
+ if o.RuntimeVersion != nil {
+ runtimeVersion = "runtime version " + *o.RuntimeVersion + " "
+ }
+
+ var targetVersion string
+ if o.TargetVersion != nil {
+ targetVersion = "target version " + *o.TargetVersion
+ }
+
+ return runtimeVersion + targetVersion
+}
+
+// ApplyOptions applies the given FindOptionFuncs to these FindOptions. Returns a pointer to the mutated value.
+func (o *FindOptions) ApplyOptions(opts []FindOptionFunc) *FindOptions {
+ for _, opt := range opts {
+ opt(o)
+ }
+ return o
+}
+
+// RuntimeVersion sets the RuntimeVersion of the FindOptions to the given version.
+func RuntimeVersion(version string) FindOptionFunc {
+ return func(options *FindOptions) {
+ options.RuntimeVersion = &version
+ }
+}
+
+// TargetVersion sets the TargetVersion of the FindOptions to the given version.
+func TargetVersion(version string) FindOptionFunc {
+ return func(options *FindOptions) {
+ options.TargetVersion = &version
+ }
+}
+
+var r = regexp.MustCompile(`^(v?[0-9]+|=)`)
+
+func checkConstraint(constraint, version *string) (score int, ok bool, err error) {
+ if constraint == nil || version == nil {
+ return 0, true, nil
+ }
+
+ matches, err := versionutils.CheckVersionMeetsConstraint(*version, *constraint)
+ if err != nil || !matches {
+ return 0, false, err
+ }
+
+ score = 1
+
+ // prioritize equal constraints
+ if r.MatchString(*constraint) {
+ score = 2
+ }
+
+ return score, true, nil
+}
+
+func match(source *ImageSource, name string, opts *FindOptions) (score int, ok bool, err error) {
+ if source.Name != name {
+ return 0, false, nil
+ }
+
+ runtimeScore, ok, err := checkConstraint(source.RuntimeVersion, opts.RuntimeVersion)
+ if err != nil || !ok {
+ return 0, false, err
+ }
+ score += runtimeScore
+
+ targetScore, ok, err := checkConstraint(source.TargetVersion, opts.TargetVersion)
+ if err != nil || !ok {
+ return 0, false, err
+ }
+ score += targetScore
+
+ return score, true, nil
+}
+
+// FindImage returns an image with the given from the sources in the image vector.
+// The specifies the kubernetes version the image will be running on.
+// The specifies the kubernetes version the image shall target.
+// If multiple entries were found, the provided is compared with the constraints
+// stated in the image definition.
+// In case multiple images match the search, the first which was found is returned.
+// In case no image was found, an error is returned.
+func (v ImageVector) FindImage(name string, opts ...FindOptionFunc) (*Image, error) {
+ o := &FindOptions{}
+ o = o.ApplyOptions(opts)
+
+ var (
+ bestScore int
+ bestCandidate *ImageSource
+ )
+
+ for _, source := range v {
+ if source.Name == name {
+ score, ok, err := match(source, name, o)
+ if err != nil {
+ return nil, err
+ }
+
+ if ok && (bestCandidate == nil || score > bestScore) {
+ bestCandidate = source
+ bestScore = score
+ }
+ }
+ }
+
+ if bestCandidate == nil {
+ return nil, fmt.Errorf("could not find image %q opts %v", name, o)
+ }
+
+ return bestCandidate.ToImage(o.TargetVersion), nil
+}
+
+// FindImages returns an image map with the given from the sources in the image vector.
+// The specifies the kubernetes version the image will be running on.
+// The specifies the kubernetes version the image shall target.
+// If multiple entries were found, the provided is compared with the constraints
+// stated in the image definition.
+// In case multiple images match the search, the first which was found is returned.
+// In case no image was found, an error is returned.
+func FindImages(v ImageVector, names []string, opts ...FindOptionFunc) (map[string]*Image, error) {
+ images := map[string]*Image{}
+ for _, imageName := range names {
+ image, err := v.FindImage(imageName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ images[imageName] = image
+ }
+ return images, nil
+}
+
+// ToImage applies the given to the source to produce an output image.
+// If the tag of an image source is empty, it will use the given as tag.
+func (i *ImageSource) ToImage(targetVersion *string) *Image {
+ tag := i.Tag
+ if tag == nil && targetVersion != nil {
+ version := fmt.Sprintf("v%s", strings.TrimLeft(*targetVersion, "v"))
+ tag = &version
+ }
+
+ return &Image{
+ Name: i.Name,
+ Repository: i.Repository,
+ Tag: tag,
+ }
+}
+
+// String will returns the string representation of the image.
+func (i *Image) String() string {
+ if i.Tag == nil {
+ return i.Repository
+ }
+
+ delimiter := ":"
+ if strings.HasPrefix(*i.Tag, SHA256TagPrefix) {
+ delimiter = "@"
+ }
+
+ return i.Repository + delimiter + *i.Tag
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector_components.go b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector_components.go
new file mode 100644
index 0000000..7295597
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector_components.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package imagevector
+
+import (
+ "io"
+ "os"
+
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ // ComponentOverrideEnv is the name of the environment variable for image vector overrides of components deployed
+ // by Gardener.
+ ComponentOverrideEnv = "IMAGEVECTOR_OVERWRITE_COMPONENTS"
+)
+
+// ReadComponentOverwrite reads an ComponentImageVector from the given io.Reader.
+func ReadComponentOverwrite(r io.Reader) (ComponentImageVectors, error) {
+ data := struct {
+ Components []ComponentImageVector `json:"components" yaml:"components"`
+ }{}
+
+ if err := yaml.NewDecoder(r).Decode(&data); err != nil {
+ return nil, err
+ }
+
+ out := make(ComponentImageVectors, len(data.Components))
+ for _, component := range data.Components {
+ out[component.Name] = component.ImageVectorOverwrite
+ }
+
+ return out, nil
+}
+
+// ReadComponentOverwriteFile reads an ComponentImageVector from the file with the given name.
+func ReadComponentOverwriteFile(name string) (ComponentImageVectors, error) {
+ file, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return ReadComponentOverwrite(file)
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/imagevector/types.go b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/types.go
new file mode 100644
index 0000000..168fb8e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/types.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package imagevector
+
+// ImageSource contains the repository and the tag of a Docker container image. If the respective
+// image is only valid for a specific Kubernetes runtime version, then it must also contain the
+// 'runtimeVersion' field describing for which versions it can be used. Similarly, if it is only
+// valid for a specific Kubernetes version to operate on, then it must also contain the 'targetVersion'
+// field describing for which versions it can be used. Examples of these are CSI controllers that run
+// in the seed cluster and act on the shoot cluster. Different versions might be used depending on the
+// seed and the shoot version.
+type ImageSource struct {
+ Name string `json:"name" yaml:"name"`
+ RuntimeVersion *string `json:"runtimeVersion,omitempty" yaml:"runtimeVersion,omitempty"`
+ TargetVersion *string `json:"targetVersion,omitempty" yaml:"targetVersion,omitempty"`
+
+ Repository string `json:"repository" yaml:"repository"`
+ Tag *string `json:"tag,omitempty" yaml:"tag,omitempty"`
+}
+
+// Image is a concrete, pullable image with a nonempty tag.
+type Image struct {
+ Name string
+ Repository string
+ Tag *string
+}
+
+// ImageVector is a list of image sources.
+type ImageVector []*ImageSource
+
+// ComponentImageVector contains an image vector overwrite for a component deployed by Gardener.
+type ComponentImageVector struct {
+ Name string `json:"name" yaml:"name"`
+ ImageVectorOverwrite string `json:"imageVectorOverwrite" yaml:"imageVectorOverwrite"`
+}
+
+// ComponentImageVectors maps a component with a given name (key) to the image vector overwrite content (value).
+type ComponentImageVectors map[string]string
+
+// FindOptions are options that can be supplied during either `FindImage` or `FindImages`.
+type FindOptions struct {
+ RuntimeVersion *string
+ TargetVersion *string
+}
+
+// FindOptionFunc is a function that mutates FindOptions.
+type FindOptionFunc func(*FindOptions)
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/infodata/infodata.go b/vendor/github.com/gardener/gardener/pkg/utils/infodata/infodata.go
new file mode 100644
index 0000000..3e99a9c
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/infodata/infodata.go
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+package infodata
+
+import (
+ "fmt"
+ "sync"
+
+ gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
+ gardencorev1alpha1helper "github.com/gardener/gardener/pkg/apis/core/v1alpha1/helper"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var lock sync.Mutex
+var types = map[TypeVersion]Unmarshaller{}
+
+// Register is used to register new InfoData type versions
+func Register(typeversion TypeVersion, unmarshaller Unmarshaller) {
+ lock.Lock()
+ defer lock.Unlock()
+ types[typeversion] = unmarshaller
+}
+
+// GetUnmarshaller returns an Unmarshaller for the given typeName.
+func GetUnmarshaller(typeName TypeVersion) Unmarshaller {
+ lock.Lock()
+ defer lock.Unlock()
+ return types[typeName]
+}
+
+// Unmarshal unmarshals a GardenerResourceData into its respective Go struct representation
+func Unmarshal(entry *gardencorev1alpha1.GardenerResourceData) (InfoData, error) {
+ unmarshaller := GetUnmarshaller(TypeVersion(entry.Type))
+ if unmarshaller == nil {
+ return nil, fmt.Errorf("unknown info data type %q", entry.Type)
+ }
+ data, err := unmarshaller(entry.Data.Raw)
+ if err != nil {
+ return nil, fmt.Errorf("cannot unmarshal data set %q of type %q: %s", entry.Name, entry.Type, err)
+ }
+ return data, nil
+}
+
+// GetInfoData retrieves the go representation of an object from the GardenerResourceDataList
+func GetInfoData(resourceDataList gardencorev1alpha1helper.GardenerResourceDataList, name string) (InfoData, error) {
+ resourceData := resourceDataList.Get(name)
+ if resourceData == nil {
+ return nil, nil
+ }
+
+ return Unmarshal(resourceData)
+}
+
+// UpsertInfoData updates or inserts an InfoData object into the GardenerResourceDataList
+func UpsertInfoData(resourceDataList *gardencorev1alpha1helper.GardenerResourceDataList, name string, data InfoData) error {
+ if _, ok := data.(*emptyInfoData); ok {
+ return nil
+ }
+
+ bytes, err := data.Marshal()
+ if err != nil {
+ return err
+ }
+
+ gardenerResourceData := &gardencorev1alpha1.GardenerResourceData{
+ Name: name,
+ Type: string(data.TypeVersion()),
+ Data: runtime.RawExtension{Raw: bytes},
+ }
+
+ resourceDataList.Upsert(gardenerResourceData)
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/infodata/types.go b/vendor/github.com/gardener/gardener/pkg/utils/infodata/types.go
new file mode 100644
index 0000000..af4fd11
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/infodata/types.go
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+package infodata
+
+// TypeVersion is the potentially versioned type name of an InfoData representation.
+type TypeVersion string
+
+// Unmarshaller is a factory to create a dedicated InfoData object from a byte stream
+type Unmarshaller func(data []byte) (InfoData, error)
+
+// InfoData is an interface which allows
+type InfoData interface {
+ TypeVersion() TypeVersion
+ Marshal() ([]byte, error)
+}
+
+// Loader is an interface which declares methods that can be used to extract InfoData from Kubernetes resources data.
+// TODO: This interface can be removed in a later version after all resources have been synced to the ShootState.
+type Loader interface {
+ LoadFromSecretData(map[string][]byte) (InfoData, error)
+}
+
+type emptyInfoData struct{}
+
+func (*emptyInfoData) Marshal() ([]byte, error) {
+ return nil, nil
+}
+
+func (*emptyInfoData) TypeVersion() TypeVersion {
+ return ""
+}
+
+// EmptyInfoData is an infodata which does not contain any information.
+var EmptyInfoData = &emptyInfoData{}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/bootstrap_token.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/bootstrap_token.go
new file mode 100644
index 0000000..6e1abb4
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/bootstrap_token.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "regexp"
+ "time"
+
+ "github.com/gardener/gardener/pkg/utils"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ bootstraptokenapi "k8s.io/cluster-bootstrap/token/api"
+ bootstraptokenutil "k8s.io/cluster-bootstrap/token/util"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+// ComputeBootstrapToken computes and creates a new bootstrap token, and returns it.
+func ComputeBootstrapToken(ctx context.Context, c client.Client, tokenID, description string, validity time.Duration) (secret *corev1.Secret, err error) {
+ var (
+ bootstrapTokenSecretKey string
+ )
+
+ secret = &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: bootstraptokenutil.BootstrapTokenSecretName(tokenID),
+ Namespace: metav1.NamespaceSystem,
+ },
+ }
+
+ if err = c.Get(ctx, Key(secret.Namespace, secret.Name), secret); client.IgnoreNotFound(err) != nil {
+ return nil, err
+ }
+
+ validBootstrapTokenSecret, _ := regexp.Compile(`[a-z0-9]{16}`)
+ if existingSecretToken, ok := secret.Data[bootstraptokenapi.BootstrapTokenSecretKey]; ok && validBootstrapTokenSecret.Match(existingSecretToken) {
+ bootstrapTokenSecretKey = string(existingSecretToken)
+ } else {
+ bootstrapTokenSecretKey, err = utils.GenerateRandomStringFromCharset(16, "0123456789abcdefghijklmnopqrstuvwxyz")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ data := map[string][]byte{
+ bootstraptokenapi.BootstrapTokenDescriptionKey: []byte(description),
+ bootstraptokenapi.BootstrapTokenIDKey: []byte(tokenID),
+ bootstraptokenapi.BootstrapTokenSecretKey: []byte(bootstrapTokenSecretKey),
+ bootstraptokenapi.BootstrapTokenExpirationKey: []byte(metav1.Now().Add(validity).Format(time.RFC3339)),
+ bootstraptokenapi.BootstrapTokenUsageAuthentication: []byte("true"),
+ bootstraptokenapi.BootstrapTokenUsageSigningKey: []byte("true"),
+ }
+
+ _, err2 := controllerutil.CreateOrUpdate(ctx, c, secret, func() error {
+ secret.Type = bootstraptokenapi.SecretTypeBootstrapToken
+ secret.Data = data
+ return nil
+ })
+
+ return secret, err2
+}
+
+// BootstrapTokenFrom returns the bootstrap token based on the secret data.
+func BootstrapTokenFrom(data map[string][]byte) string {
+ return bootstraptokenutil.TokenFromIDAndSecret(string(data[bootstraptokenapi.BootstrapTokenIDKey]), string(data[bootstraptokenapi.BootstrapTokenSecretKey]))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerinstallation.go
new file mode 100644
index 0000000..f4fc0de
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerinstallation.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardencore "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/logger"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/util/retry"
+)
+
+func tryUpdateControllerInstallation(
+ ctx context.Context,
+ g gardencore.Interface,
+ backoff wait.Backoff,
+ meta metav1.ObjectMeta,
+ transform func(*gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error),
+ updateFunc func(g gardencore.Interface, controllerInstallation *gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error),
+ equalFunc func(cur, updated *gardencorev1beta1.ControllerInstallation) bool,
+) (*gardencorev1beta1.ControllerInstallation, error) {
+
+ var (
+ result *gardencorev1beta1.ControllerInstallation
+ attempt int
+ )
+ err := retry.RetryOnConflict(backoff, func() (err error) {
+ attempt++
+ cur, err := g.CoreV1beta1().ControllerInstallations().Get(ctx, meta.Name, kubernetes.DefaultGetOptions())
+ if err != nil {
+ return err
+ }
+
+ updated, err := transform(cur.DeepCopy())
+ if err != nil {
+ return err
+ }
+
+ if equalFunc(cur, updated) {
+ result = cur
+ return nil
+ }
+
+ result, err = updateFunc(g, updated)
+ if err != nil {
+ logger.Logger.Errorf("Attempt %d failed to update ControllerInstallation %s due to %v", attempt, cur.Name, err)
+ }
+ return
+ })
+ if err != nil {
+ logger.Logger.Errorf("Failed to update ControllerInstallation %s after %d attempts due to %v", meta.Name, attempt, err)
+ }
+ return result, err
+}
+
+// TryUpdateControllerInstallationWithEqualFunc tries to update the status of the controllerInstallation matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the ControllerInstallation object. If the equal
+// func concludes a semantically equal ControllerInstallation, no update is done and the operation returns normally.
+func TryUpdateControllerInstallationWithEqualFunc(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error), equal func(cur, updated *gardencorev1beta1.ControllerInstallation) bool) (*gardencorev1beta1.ControllerInstallation, error) {
+ return tryUpdateControllerInstallation(ctx, g, backoff, meta, transform, func(g gardencore.Interface, controllerInstallation *gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error) {
+ return g.CoreV1beta1().ControllerInstallations().Update(ctx, controllerInstallation, kubernetes.DefaultUpdateOptions())
+ }, equal)
+}
+
+// TryUpdateControllerInstallation tries to update the status of the controllerInstallation matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the ControllerInstallation object. If the transformation
+// yields a semantically equal ControllerInstallation, no update is done and the operation returns normally.
+func TryUpdateControllerInstallation(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error)) (*gardencorev1beta1.ControllerInstallation, error) {
+ return TryUpdateControllerInstallationWithEqualFunc(ctx, g, backoff, meta, transform, func(cur, updated *gardencorev1beta1.ControllerInstallation) bool {
+ return equality.Semantic.DeepEqual(cur, updated)
+ })
+}
+
+// TryUpdateControllerInstallationStatusWithEqualFunc tries to update the status of the controllerInstallation matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the ControllerInstallation object. If the equal
+// func concludes a semantically equal ControllerInstallation, no update is done and the operation returns normally.
+func TryUpdateControllerInstallationStatusWithEqualFunc(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error), equal func(cur, updated *gardencorev1beta1.ControllerInstallation) bool) (*gardencorev1beta1.ControllerInstallation, error) {
+ return tryUpdateControllerInstallation(ctx, g, backoff, meta, transform, func(g gardencore.Interface, controllerInstallation *gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error) {
+ return g.CoreV1beta1().ControllerInstallations().UpdateStatus(ctx, controllerInstallation, kubernetes.DefaultUpdateOptions())
+ }, equal)
+}
+
+// TryUpdateControllerInstallationStatus tries to update the status of the controllerInstallation matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the ControllerInstallation object. If the transformation
+// yields a semantically equal ControllerInstallation, no update is done and the operation returns normally.
+func TryUpdateControllerInstallationStatus(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.ControllerInstallation) (*gardencorev1beta1.ControllerInstallation, error)) (*gardencorev1beta1.ControllerInstallation, error) {
+ return TryUpdateControllerInstallationStatusWithEqualFunc(ctx, g, backoff, meta, transform, func(cur, updated *gardencorev1beta1.ControllerInstallation) bool {
+ return equality.Semantic.DeepEqual(cur.Status, updated.Status)
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerregistration.go
new file mode 100644
index 0000000..60ce13e
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllerregistration.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardencore "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/logger"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/util/retry"
+)
+
+func tryUpdateControllerRegistration(
+ ctx context.Context,
+ g gardencore.Interface,
+ backoff wait.Backoff,
+ meta metav1.ObjectMeta,
+ transform func(*gardencorev1beta1.ControllerRegistration) (*gardencorev1beta1.ControllerRegistration, error),
+ updateFunc func(g gardencore.Interface, controllerRegistration *gardencorev1beta1.ControllerRegistration) (*gardencorev1beta1.ControllerRegistration, error),
+ equalFunc func(cur, updated *gardencorev1beta1.ControllerRegistration) bool,
+) (*gardencorev1beta1.ControllerRegistration, error) {
+
+ var (
+ result *gardencorev1beta1.ControllerRegistration
+ attempt int
+ )
+ err := retry.RetryOnConflict(backoff, func() (err error) {
+ attempt++
+ cur, err := g.CoreV1beta1().ControllerRegistrations().Get(ctx, meta.Name, kubernetes.DefaultGetOptions())
+ if err != nil {
+ return err
+ }
+
+ updated, err := transform(cur.DeepCopy())
+ if err != nil {
+ return err
+ }
+
+ if equalFunc(cur, updated) {
+ result = cur
+ return nil
+ }
+
+ result, err = updateFunc(g, updated)
+ if err != nil {
+ logger.Logger.Errorf("Attempt %d failed to update ControllerRegistration %s due to %v", attempt, cur.Name, err)
+ }
+ return
+ })
+ if err != nil {
+ logger.Logger.Errorf("Failed to update ControllerRegistration %s after %d attempts due to %v", meta.Name, attempt, err)
+ }
+ return result, err
+}
+
+// TryUpdateControllerRegistrationWithEqualFunc tries to update the status of the controllerRegistration matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the ControllerRegistration object. If the equal
+// func concludes a semantically equal ControllerRegistration, no update is done and the operation returns normally.
+func TryUpdateControllerRegistrationWithEqualFunc(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.ControllerRegistration) (*gardencorev1beta1.ControllerRegistration, error), equal func(cur, updated *gardencorev1beta1.ControllerRegistration) bool) (*gardencorev1beta1.ControllerRegistration, error) {
+ return tryUpdateControllerRegistration(ctx, g, backoff, meta, transform, func(g gardencore.Interface, controllerRegistration *gardencorev1beta1.ControllerRegistration) (*gardencorev1beta1.ControllerRegistration, error) {
+ return g.CoreV1beta1().ControllerRegistrations().Update(ctx, controllerRegistration, kubernetes.DefaultUpdateOptions())
+ }, equal)
+}
+
+// TryUpdateControllerRegistration tries to update the status of the controllerRegistration matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the ControllerRegistration object. If the transformation
+// yields a semantically equal ControllerRegistration, no update is done and the operation returns normally.
+func TryUpdateControllerRegistration(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.ControllerRegistration) (*gardencorev1beta1.ControllerRegistration, error)) (*gardencorev1beta1.ControllerRegistration, error) {
+ return TryUpdateControllerRegistrationWithEqualFunc(ctx, g, backoff, meta, transform, func(cur, updated *gardencorev1beta1.ControllerRegistration) bool {
+ return equality.Semantic.DeepEqual(cur, updated)
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/daemonset.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/daemonset.go
new file mode 100644
index 0000000..f2364bb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/daemonset.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+)
+
+// DaemonSetSource is a function that produces a slice of DaemonSets or an error.
+type DaemonSetSource func() ([]*appsv1.DaemonSet, error)
+
+// DaemonSetLister is a lister of DaemonSets.
+type DaemonSetLister interface {
+ // List lists all DaemonSets that match the given selector.
+ List(selector labels.Selector) ([]*appsv1.DaemonSet, error)
+ // DaemonSets yields a DaemonSetNamespaceLister for the given namespace.
+ DaemonSets(namespace string) DaemonSetNamespaceLister
+}
+
+// DaemonSetNamespaceLister is a lister of deployments for a specific namespace.
+type DaemonSetNamespaceLister interface {
+ // List lists all DaemonSets that match the given selector in the current namespace.
+ List(selector labels.Selector) ([]*appsv1.DaemonSet, error)
+ // Get retrieves the DaemonSet with the given name in the current namespace.
+ Get(name string) (*appsv1.DaemonSet, error)
+}
+
+type daemonSetLister struct {
+ source DaemonSetSource
+}
+
+type daemonSetNamespaceLister struct {
+ source DaemonSetSource
+ namespace string
+}
+
+// NewDaemonSetLister creates a new DaemonSetLister from the given DaemonSetSource.
+func NewDaemonSetLister(source DaemonSetSource) DaemonSetLister {
+ return &daemonSetLister{source: source}
+}
+
+func filterDaemonSets(source DaemonSetSource, filter func(*appsv1.DaemonSet) bool) ([]*appsv1.DaemonSet, error) {
+ daemonSets, err := source()
+ if err != nil {
+ return nil, err
+ }
+
+ var out []*appsv1.DaemonSet
+ for _, daemonSet := range daemonSets {
+ if filter(daemonSet) {
+ out = append(out, daemonSet)
+ }
+ }
+ return out, nil
+}
+
+func (d *daemonSetLister) List(selector labels.Selector) ([]*appsv1.DaemonSet, error) {
+ return filterDaemonSets(d.source, func(daemonSet *appsv1.DaemonSet) bool {
+ return selector.Matches(labels.Set(daemonSet.Labels))
+ })
+}
+
+func (d *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister {
+ return &daemonSetNamespaceLister{
+ source: d.source,
+ namespace: namespace,
+ }
+}
+
+func (d *daemonSetNamespaceLister) Get(name string) (*appsv1.DaemonSet, error) {
+ daemonSets, err := filterDaemonSets(d.source, func(daemonSet *appsv1.DaemonSet) bool {
+ return daemonSet.Namespace == d.namespace && daemonSet.Name == name
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(daemonSets) == 0 {
+ return nil, apierrors.NewNotFound(appsv1.Resource("DaemonSets"), name)
+ }
+ return daemonSets[0], nil
+}
+
+func (d *daemonSetNamespaceLister) List(selector labels.Selector) ([]*appsv1.DaemonSet, error) {
+ return filterDaemonSets(d.source, func(daemonSet *appsv1.DaemonSet) bool {
+ return daemonSet.Namespace == d.namespace && selector.Matches(labels.Set(daemonSet.Labels))
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/deployment.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/deployment.go
new file mode 100644
index 0000000..c5f7840
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/deployment.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "fmt"
+ "strings"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+
+ "github.com/Masterminds/semver"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// ValidDeploymentContainerImageVersion validates compliance of a deployment container image to a minimum version
+func ValidDeploymentContainerImageVersion(deploymentToCheck *appsv1.Deployment, containerName, minimumVersion string) (bool, error) {
+ containers := deploymentToCheck.Spec.Template.Spec.Containers
+ getContainer := func(container string) (*corev1.Container, error) {
+ for _, container := range containers {
+ if container.Name == containerName {
+ return &container, nil
+ }
+ }
+ return nil, fmt.Errorf("container %q does not belong to this deployment", container)
+ }
+
+ containerToCheck, err := getContainer(containerName)
+ if err != nil {
+ return false, err
+ }
+ actualVersion, err := semver.NewVersion(strings.Split(containerToCheck.Image, ":")[1])
+ if err != nil {
+ return false, err
+ }
+ minVersion, err := semver.NewVersion(minimumVersion)
+ if err != nil {
+ return false, err
+ }
+ if actualVersion.LessThan(minVersion) {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// DeploymentSource is a function that produces a slice of Deployments or an error.
+type DeploymentSource func() ([]*appsv1.Deployment, error)
+
+// DeploymentLister is a lister of Deployments.
+type DeploymentLister interface {
+ // List lists all Deployments that match the given selector.
+ List(selector labels.Selector) ([]*appsv1.Deployment, error)
+ // Deployments yields a DeploymentNamespaceLister for the given namespace.
+ Deployments(namespace string) DeploymentNamespaceLister
+}
+
+// DeploymentNamespaceLister is a lister of deployments for a specific namespace.
+type DeploymentNamespaceLister interface {
+ // List lists all Deployments that match the given selector in the current namespace.
+ List(selector labels.Selector) ([]*appsv1.Deployment, error)
+ // Get retrieves the Deployment with the given name in the current namespace.
+ Get(name string) (*appsv1.Deployment, error)
+}
+
+type deploymentLister struct {
+ source DeploymentSource
+}
+
+type deploymentNamespaceLister struct {
+ source DeploymentSource
+ namespace string
+}
+
+// NewDeploymentLister creates a new DeploymentLister from the given DeploymentSource.
+func NewDeploymentLister(source DeploymentSource) DeploymentLister {
+ return &deploymentLister{source: source}
+}
+
+func filterDeployments(source DeploymentSource, filter func(*appsv1.Deployment) bool) ([]*appsv1.Deployment, error) {
+ deployments, err := source()
+ if err != nil {
+ return nil, err
+ }
+
+ var out []*appsv1.Deployment
+ for _, deployment := range deployments {
+ if filter(deployment) {
+ out = append(out, deployment)
+ }
+ }
+ return out, nil
+}
+
+func (d *deploymentLister) List(selector labels.Selector) ([]*appsv1.Deployment, error) {
+ return filterDeployments(d.source, func(deployment *appsv1.Deployment) bool {
+ return selector.Matches(labels.Set(deployment.Labels))
+ })
+}
+
+func (d *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister {
+ return &deploymentNamespaceLister{
+ source: d.source,
+ namespace: namespace,
+ }
+}
+
+func (d *deploymentNamespaceLister) Get(name string) (*appsv1.Deployment, error) {
+ deployments, err := filterDeployments(d.source, func(deployment *appsv1.Deployment) bool {
+ return deployment.Namespace == d.namespace && deployment.Name == name
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(deployments) == 0 {
+ return nil, apierrors.NewNotFound(appsv1.Resource("Deployments"), name)
+ }
+ return deployments[0], nil
+}
+
+func (d *deploymentNamespaceLister) List(selector labels.Selector) ([]*appsv1.Deployment, error) {
+ return filterDeployments(d.source, func(deployment *appsv1.Deployment) bool {
+ return deployment.Namespace == d.namespace && selector.Matches(labels.Set(deployment.Labels))
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/etcd.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/etcd.go
new file mode 100644
index 0000000..f28b576
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/etcd.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1"
+ appsv1 "k8s.io/api/apps/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+)
+
+// EtcdSource is a function that produces a slice of Etcds or an error.
+type EtcdSource func() ([]*druidv1alpha1.Etcd, error)
+
+// EtcdLister is a lister of Etcds.
+type EtcdLister interface {
+ // List lists all Etcds that match the given selector.
+ List(selector labels.Selector) ([]*druidv1alpha1.Etcd, error)
+ // Etcds yields a EtcdNamespaceLister for the given namespace.
+ Etcds(namespace string) EtcdNamespaceLister
+}
+
+// EtcdNamespaceLister is a lister of etcds for a specific namespace.
+type EtcdNamespaceLister interface {
+ // List lists all Etcds that match the given selector in the current namespace.
+ List(selector labels.Selector) ([]*druidv1alpha1.Etcd, error)
+ // Get retrieves the Etcd with the given name in the current namespace.
+ Get(name string) (*druidv1alpha1.Etcd, error)
+}
+
+type etcdLister struct {
+ source EtcdSource
+}
+
+type etcdNamespaceLister struct {
+ source EtcdSource
+ namespace string
+}
+
+// NewEtcdLister creates a new EtcdLister from the given EtcdSource.
+func NewEtcdLister(source EtcdSource) EtcdLister {
+ return &etcdLister{source: source}
+}
+
+func filterEtcds(source EtcdSource, filter func(*druidv1alpha1.Etcd) bool) ([]*druidv1alpha1.Etcd, error) {
+ etcds, err := source()
+ if err != nil {
+ return nil, err
+ }
+
+ var out []*druidv1alpha1.Etcd
+ for _, etcd := range etcds {
+ if filter(etcd) {
+ out = append(out, etcd)
+ }
+ }
+ return out, nil
+}
+
+func (d *etcdLister) List(selector labels.Selector) ([]*druidv1alpha1.Etcd, error) {
+ return filterEtcds(d.source, func(node *druidv1alpha1.Etcd) bool {
+ return selector.Matches(labels.Set(node.Labels))
+ })
+}
+
+func (d *etcdLister) Etcds(namespace string) EtcdNamespaceLister {
+ return &etcdNamespaceLister{
+ source: d.source,
+ namespace: namespace,
+ }
+}
+
+func (d *etcdNamespaceLister) Get(name string) (*druidv1alpha1.Etcd, error) {
+ etcds, err := filterEtcds(d.source, func(etcd *druidv1alpha1.Etcd) bool {
+ return etcd.Namespace == d.namespace && etcd.Name == name
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(etcds) == 0 {
+ return nil, apierrors.NewNotFound(appsv1.Resource("Etcds"), name)
+ }
+ return etcds[0], nil
+}
+
+func (d *etcdNamespaceLister) List(selector labels.Selector) ([]*druidv1alpha1.Etcd, error) {
+ return filterEtcds(d.source, func(etcd *druidv1alpha1.Etcd) bool {
+ return etcd.Namespace == d.namespace && selector.Matches(labels.Set(etcd.Labels))
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go
new file mode 100644
index 0000000..efe6c02
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package health
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Func is a type for a function that checks the health of a runtime.Object.
+type Func func(runtime.Object) error
+
+// And combines multiple health check funcs to a single func, checking all funcs sequentially and return the first
+// error that occurs or nil if no error occurs.¬
+func And(fns ...Func) Func {
+ return func(o runtime.Object) error {
+ for _, fn := range fns {
+ if err := fn(o); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go
new file mode 100644
index 0000000..283456b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go
@@ -0,0 +1,425 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package health
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1"
+ resourcesv1alpha1 "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/client-go/rest"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
+ gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper"
+ extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
+ "github.com/gardener/gardener/pkg/utils"
+ "github.com/sirupsen/logrus"
+)
+
+func requiredConditionMissing(conditionType string) error {
+ return fmt.Errorf("condition %q is missing", conditionType)
+}
+
+func checkConditionState(conditionType string, expected, actual, reason, message string) error {
+ if expected != actual {
+ return fmt.Errorf("condition %q has invalid status %s (expected %s) due to %s: %s",
+ conditionType, actual, expected, reason, message)
+ }
+ return nil
+}
+
+func getDeploymentCondition(conditions []appsv1.DeploymentCondition, conditionType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ return &condition
+ }
+ }
+ return nil
+}
+
+func getNodeCondition(conditions []corev1.NodeCondition, conditionType corev1.NodeConditionType) *corev1.NodeCondition {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ return &condition
+ }
+ }
+ return nil
+}
+
+var (
+ trueDeploymentConditionTypes = []appsv1.DeploymentConditionType{
+ appsv1.DeploymentAvailable,
+ }
+
+ trueOptionalDeploymentConditionTypes = []appsv1.DeploymentConditionType{
+ appsv1.DeploymentProgressing,
+ }
+
+ falseOptionalDeploymentConditionTypes = []appsv1.DeploymentConditionType{
+ appsv1.DeploymentReplicaFailure,
+ }
+)
+
+// CheckDeployment checks whether the given Deployment is healthy.
+// A deployment is considered healthy if the controller observed its current revision and
+// if the number of updated replicas is equal to the number of replicas.
+func CheckDeployment(deployment *appsv1.Deployment) error {
+ if deployment.Status.ObservedGeneration < deployment.Generation {
+ return fmt.Errorf("observed generation outdated (%d/%d)", deployment.Status.ObservedGeneration, deployment.Generation)
+ }
+
+ for _, trueConditionType := range trueDeploymentConditionTypes {
+ conditionType := string(trueConditionType)
+ condition := getDeploymentCondition(deployment.Status.Conditions, trueConditionType)
+ if condition == nil {
+ return requiredConditionMissing(conditionType)
+ }
+ if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ for _, trueOptionalConditionType := range trueOptionalDeploymentConditionTypes {
+ conditionType := string(trueOptionalConditionType)
+ condition := getDeploymentCondition(deployment.Status.Conditions, trueOptionalConditionType)
+ if condition == nil {
+ continue
+ }
+ if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ for _, falseOptionalConditionType := range falseOptionalDeploymentConditionTypes {
+ conditionType := string(falseOptionalConditionType)
+ condition := getDeploymentCondition(deployment.Status.Conditions, falseOptionalConditionType)
+ if condition == nil {
+ continue
+ }
+ if err := checkConditionState(conditionType, string(corev1.ConditionFalse), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CheckStatefulSet checks whether the given StatefulSet is healthy.
+// A StatefulSet is considered healthy if its controller observed its current revision,
+// it is not in an update (i.e. UpdateRevision is empty) and if its current replicas are equal to
+// its desired replicas.
+func CheckStatefulSet(statefulSet *appsv1.StatefulSet) error {
+ if statefulSet.Status.ObservedGeneration < statefulSet.Generation {
+ return fmt.Errorf("observed generation outdated (%d/%d)", statefulSet.Status.ObservedGeneration, statefulSet.Generation)
+ }
+
+ replicas := int32(1)
+ if statefulSet.Spec.Replicas != nil {
+ replicas = *statefulSet.Spec.Replicas
+ }
+
+ if statefulSet.Status.ReadyReplicas < replicas {
+ return fmt.Errorf("not enough ready replicas (%d/%d)", statefulSet.Status.ReadyReplicas, replicas)
+ }
+ return nil
+}
+
+// CheckEtcd checks whether the given Etcd is healthy.
+// A Etcd is considered healthy if its ready field in status is true.
+func CheckEtcd(etcd *druidv1alpha1.Etcd) error {
+ if !utils.IsTrue(etcd.Status.Ready) {
+ return fmt.Errorf("etcd %s is not ready yet", etcd.Name)
+ }
+ return nil
+}
+
+func daemonSetMaxUnavailable(daemonSet *appsv1.DaemonSet) int32 {
+ if daemonSet.Status.DesiredNumberScheduled == 0 || daemonSet.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
+ return 0
+ }
+
+ rollingUpdate := daemonSet.Spec.UpdateStrategy.RollingUpdate
+ if rollingUpdate == nil {
+ return 0
+ }
+
+ maxUnavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, int(daemonSet.Status.DesiredNumberScheduled), false)
+ if err != nil {
+ return 0
+ }
+
+ return int32(maxUnavailable)
+}
+
+// CheckDaemonSet checks whether the given DaemonSet is healthy.
+// A DaemonSet is considered healthy if its controller observed its current revision and if
+// its desired number of scheduled pods is equal to its updated number of scheduled pods.
+func CheckDaemonSet(daemonSet *appsv1.DaemonSet) error {
+ if daemonSet.Status.ObservedGeneration < daemonSet.Generation {
+ return fmt.Errorf("observed generation outdated (%d/%d)", daemonSet.Status.ObservedGeneration, daemonSet.Generation)
+ }
+
+ maxUnavailable := daemonSetMaxUnavailable(daemonSet)
+
+ if requiredAvailable := daemonSet.Status.DesiredNumberScheduled - maxUnavailable; daemonSet.Status.CurrentNumberScheduled < requiredAvailable {
+ return fmt.Errorf("not enough available replicas (%d/%d)", daemonSet.Status.CurrentNumberScheduled, requiredAvailable)
+ }
+ return nil
+}
+
+// NodeOutOfDisk is deprecated NodeConditionType.
+// It is no longer reported by kubelet >= 1.13. See https://github.com/kubernetes/kubernetes/pull/70111.
+// +deprecated
+const NodeOutOfDisk = "OutOfDisk"
+
+var (
+ trueNodeConditionTypes = []corev1.NodeConditionType{
+ corev1.NodeReady,
+ }
+
+ falseNodeConditionTypes = []corev1.NodeConditionType{
+ corev1.NodeDiskPressure,
+ corev1.NodeMemoryPressure,
+ corev1.NodeNetworkUnavailable,
+ corev1.NodePIDPressure,
+ NodeOutOfDisk,
+ }
+)
+
+// CheckNode checks whether the given Node is healthy.
+// A node is considered healthy if it has a `corev1.NodeReady` condition and this condition reports
+// `corev1.ConditionTrue`.
+func CheckNode(node *corev1.Node) error {
+ for _, trueConditionType := range trueNodeConditionTypes {
+ conditionType := string(trueConditionType)
+ condition := getNodeCondition(node.Status.Conditions, trueConditionType)
+ if condition == nil {
+ return requiredConditionMissing(conditionType)
+ }
+ if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ for _, falseConditionType := range falseNodeConditionTypes {
+ conditionType := string(falseConditionType)
+ condition := getNodeCondition(node.Status.Conditions, falseConditionType)
+ if condition == nil {
+ continue
+ }
+ if err := checkConditionState(conditionType, string(corev1.ConditionFalse), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+var (
+ trueSeedConditionTypes = []gardencorev1beta1.ConditionType{
+ gardencorev1beta1.SeedGardenletReady,
+ gardencorev1beta1.SeedBootstrapped,
+ }
+)
+
+// CheckSeed checks if the Seed is up-to-date and if its extensions have been successfully bootstrapped.
+func CheckSeed(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error {
+ if !apiequality.Semantic.DeepEqual(seed.Status.Gardener, identity) {
+ return fmt.Errorf("observing Gardener version not up to date (%v/%v)", seed.Status.Gardener, identity)
+ }
+
+ return checkSeed(seed, identity)
+}
+
+// CheckSeedForMigration checks if the Seed is up-to-date (comparing only the versions) and if its extensions have been successfully bootstrapped.
+func CheckSeedForMigration(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error {
+ if seed.Status.Gardener.Version != identity.Version {
+ return fmt.Errorf("observing Gardener version not up to date (%s/%s)", seed.Status.Gardener.Version, identity.Version)
+ }
+
+ return checkSeed(seed, identity)
+}
+
+// checkSeed checks if the seed.Status.ObservedGeneration ObservedGeneration is not outdated and if its extensions have been successfully bootstrapped.
+func checkSeed(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error {
+ if seed.Status.ObservedGeneration < seed.Generation {
+ return fmt.Errorf("observed generation outdated (%d/%d)", seed.Status.ObservedGeneration, seed.Generation)
+ }
+
+ for _, trueConditionType := range trueSeedConditionTypes {
+ conditionType := string(trueConditionType)
+ condition := gardencorev1beta1helper.GetCondition(seed.Status.Conditions, trueConditionType)
+ if condition == nil {
+ return requiredConditionMissing(conditionType)
+ }
+ if err := checkConditionState(conditionType, string(gardencorev1beta1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CheckExtensionObject checks if an extension Object is healthy or not.
+// An extension object is healthy if
+// * Its observed generation is up-to-date
+// * No gardener.cloud/operation is set
+// * No lastError is in the status
+// * A last operation is state succeeded is present
+func CheckExtensionObject(o runtime.Object) error {
+ obj, ok := o.(extensionsv1alpha1.Object)
+ if !ok {
+ return fmt.Errorf("expected extensionsv1alpha1.Object but got %T", o)
+ }
+
+ status := obj.GetExtensionStatus()
+ return checkExtensionObject(obj.GetGeneration(), status.GetObservedGeneration(), obj.GetAnnotations(), status.GetLastError(), status.GetLastOperation())
+}
+
+// ExtensionOperationHasBeenUpdatedSince returns a health check function that checks if an extension Object's last
+// operation has been updated since `lastUpdateTime`.
+func ExtensionOperationHasBeenUpdatedSince(lastUpdateTime metav1.Time) Func {
+ return func(o runtime.Object) error {
+ obj, ok := o.(extensionsv1alpha1.Object)
+ if !ok {
+ return fmt.Errorf("expected extensionsv1alpha1.Object but got %T", o)
+ }
+
+ lastOperation := obj.GetExtensionStatus().GetLastOperation()
+ if lastOperation == nil || !lastOperation.LastUpdateTime.After(lastUpdateTime.Time) {
+ return fmt.Errorf("extension operation was not updated yet")
+ }
+ return nil
+ }
+}
+
+// CheckBackupBucket checks if an backup bucket Object is healthy or not.
+func CheckBackupBucket(bb runtime.Object) error {
+ obj, ok := bb.(*gardencorev1beta1.BackupBucket)
+ if !ok {
+ return fmt.Errorf("expected gardencorev1beta1.BackupBucket but got %T", bb)
+ }
+ return checkExtensionObject(obj.Generation, obj.Status.ObservedGeneration, obj.Annotations, obj.Status.LastError, obj.Status.LastOperation)
+}
+
+// checkExtensionObject checks if an extension Object is healthy or not.
+func checkExtensionObject(generation int64, observedGeneration int64, annotations map[string]string, lastError *gardencorev1beta1.LastError, lastOperation *gardencorev1beta1.LastOperation) error {
+ if lastError != nil {
+ return gardencorev1beta1helper.NewErrorWithCodes(fmt.Sprintf("extension encountered error during reconciliation: %s", lastError.Description), lastError.Codes...)
+ }
+
+ if observedGeneration != generation {
+ return fmt.Errorf("observed generation outdated (%d/%d)", observedGeneration, generation)
+ }
+
+ if op, ok := annotations[v1beta1constants.GardenerOperation]; ok {
+ return fmt.Errorf("gardener operation %q is not yet picked up by controller", op)
+ }
+
+ if lastOperation == nil {
+ return fmt.Errorf("extension did not record a last operation yet")
+ }
+
+ if lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded {
+ return fmt.Errorf("extension state is not succeeded but %v", lastOperation.State)
+ }
+
+ return nil
+}
+
+// Now determines the current time.
+var Now = time.Now
+
+// ConditionerFunc to update a condition with type and message
+type conditionerFunc func(conditionType string, message string) gardencorev1beta1.Condition
+
+// CheckAPIServerAvailability checks if the API server of a cluster is reachable and measure the response time.
+func CheckAPIServerAvailability(ctx context.Context, condition gardencorev1beta1.Condition, restClient rest.Interface, conditioner conditionerFunc, log logrus.FieldLogger) gardencorev1beta1.Condition {
+ now := Now()
+ response := restClient.Get().AbsPath("/healthz").Do(ctx)
+ responseDurationText := fmt.Sprintf("[response_time:%dms]", Now().Sub(now).Nanoseconds()/time.Millisecond.Nanoseconds())
+ if response.Error() != nil {
+ message := fmt.Sprintf("Request to API server /healthz endpoint failed. %s (%s)", responseDurationText, response.Error().Error())
+ return conditioner("HealthzRequestFailed", message)
+ }
+
+ // Determine the status code of the response.
+ var statusCode int
+ response.StatusCode(&statusCode)
+
+ if statusCode != http.StatusOK {
+ var body string
+ bodyRaw, err := response.Raw()
+ if err != nil {
+ body = fmt.Sprintf("Could not parse response body: %s", err.Error())
+ } else {
+ body = string(bodyRaw)
+ }
+ message := fmt.Sprintf("API server /healthz endpoint check returned a non ok status code %d. (%s)", statusCode, body)
+ log.Error(message)
+ return conditioner("HealthzRequestError", message)
+ }
+
+ message := "API server /healthz endpoint responded with success status code."
+ return gardencorev1beta1helper.UpdatedCondition(condition, gardencorev1beta1.ConditionTrue, "HealthzRequestSucceeded", message)
+}
+
+var (
+ trueManagedResourceConditionTypes = []resourcesv1alpha1.ConditionType{
+ resourcesv1alpha1.ResourcesApplied,
+ resourcesv1alpha1.ResourcesHealthy,
+ }
+)
+
+// CheckManagedResource checks whether the given ManagedResource is healthy.
+// A ManagedResource is considered healthy if its controller observed its current revision,
+// and if the required conditions are healthy.
+func CheckManagedResource(managedResource *resourcesv1alpha1.ManagedResource) error {
+ if managedResource.Status.ObservedGeneration < managedResource.Generation {
+ return fmt.Errorf("observed generation outdated (%d/%d)", managedResource.Status.ObservedGeneration, managedResource.Generation)
+ }
+
+ for _, trueConditionType := range trueManagedResourceConditionTypes {
+ conditionType := string(trueConditionType)
+ condition := getManagedResourceCondition(managedResource.Status.Conditions, trueConditionType)
+ if condition == nil {
+ return requiredConditionMissing(conditionType)
+ }
+ if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func getManagedResourceCondition(conditions []resourcesv1alpha1.ManagedResourceCondition, conditionType resourcesv1alpha1.ConditionType) *resourcesv1alpha1.ManagedResourceCondition {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ return &condition
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go
new file mode 100644
index 0000000..dbafa0b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Coppied from https://github.com/kubernetes/kubernetes/blob/a93f803f8e400f1d42dc812bc51932ff3b31798a/pkg/api/pod/util.go#L181-L211
+
+package health
+
+import (
+ corev1 "k8s.io/api/core/v1"
+)
+
+// IsPodReady returns true if a pod is ready; false otherwise.
+func IsPodReady(pod *corev1.Pod) bool {
+ return IsPodReadyConditionTrue(pod.Status)
+}
+
+// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
+func IsPodReadyConditionTrue(status corev1.PodStatus) bool {
+ condition := GetPodReadyCondition(status)
+ return condition != nil && condition.Status == corev1.ConditionTrue
+}
+
+// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
+// Returns nil if the condition is not present.
+func GetPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition {
+ _, condition := GetPodCondition(&status, corev1.PodReady)
+ return condition
+}
+
+// GetPodCondition extracts the provided condition from the given status and returns that.
+// Returns nil and -1 if the condition is not present, and the index of the located condition.
+func GetPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
+ if status == nil {
+ return -1, nil
+ }
+ for i := range status.Conditions {
+ if status.Conditions[i].Type == conditionType {
+ return i, &status.Conditions[i]
+ }
+ }
+ return -1, nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go
new file mode 100644
index 0000000..71bf959
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go
@@ -0,0 +1,539 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/utils/retry"
+
+ "github.com/sirupsen/logrus"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/duration"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+// TruncateLabelValue truncates a string at 63 characters so it's suitable for a label value.
+func TruncateLabelValue(s string) string {
+ if len(s) > 63 {
+ return s[:63]
+ }
+ return s
+}
+
+// SetMetaDataLabel sets the key value pair in the labels section of the given Object.
+// If the given Object did not yet have labels, they are initialized.
+func SetMetaDataLabel(meta metav1.Object, key, value string) {
+ labels := meta.GetLabels()
+ if labels == nil {
+ labels = make(map[string]string)
+ }
+ labels[key] = value
+ meta.SetLabels(labels)
+}
+
+// SetMetaDataAnnotation sets the annotation on the given object.
+// If the given Object did not yet have annotations, they are initialized.
+func SetMetaDataAnnotation(meta metav1.Object, key, value string) {
+ annotations := meta.GetAnnotations()
+ if annotations == nil {
+ annotations = make(map[string]string)
+ }
+ annotations[key] = value
+ meta.SetAnnotations(annotations)
+}
+
+// HasMetaDataAnnotation checks if the passed meta object has the given key, value set in the annotations section.
+func HasMetaDataAnnotation(meta metav1.Object, key, value string) bool {
+ val, ok := meta.GetAnnotations()[key]
+ return ok && val == value
+}
+
+// HasDeletionTimestamp checks if an object has a deletion timestamp
+func HasDeletionTimestamp(obj runtime.Object) (bool, error) {
+ metadata, err := meta.Accessor(obj)
+ if err != nil {
+ return false, err
+ }
+ return metadata.GetDeletionTimestamp() != nil, nil
+}
+
+func nameAndNamespace(namespaceOrName string, nameOpt ...string) (namespace, name string) {
+ if len(nameOpt) > 1 {
+ panic(fmt.Sprintf("more than name/namespace for key specified: %s/%v", namespaceOrName, nameOpt))
+ }
+ if len(nameOpt) == 0 {
+ name = namespaceOrName
+ return
+ }
+ namespace = namespaceOrName
+ name = nameOpt[0]
+ return
+}
+
+// Key creates a new client.ObjectKey from the given parameters.
+// There are only two ways to call this function:
+// - If only namespaceOrName is set, then a client.ObjectKey with name set to namespaceOrName is returned.
+// - If namespaceOrName and one nameOpt is given, then a client.ObjectKey with namespace set to namespaceOrName
+// and name set to nameOpt[0] is returned.
+// For all other cases, this method panics.
+func Key(namespaceOrName string, nameOpt ...string) client.ObjectKey {
+ namespace, name := nameAndNamespace(namespaceOrName, nameOpt...)
+ return client.ObjectKey{Namespace: namespace, Name: name}
+}
+
+// KeyFromObject obtains the client.ObjectKey from the given metav1.Object.
+// Deprecated: use client.ObjectKeyFromObject instead.
+func KeyFromObject(obj metav1.Object) client.ObjectKey {
+ return Key(obj.GetNamespace(), obj.GetName())
+}
+
+// ObjectMeta creates a new metav1.ObjectMeta from the given parameters.
+// There are only two ways to call this function:
+// - If only namespaceOrName is set, then a metav1.ObjectMeta with name set to namespaceOrName is returned.
+// - If namespaceOrName and one nameOpt is given, then a metav1.ObjectMeta with namespace set to namespaceOrName
+// and name set to nameOpt[0] is returned.
+// For all other cases, this method panics.
+func ObjectMeta(namespaceOrName string, nameOpt ...string) metav1.ObjectMeta {
+ namespace, name := nameAndNamespace(namespaceOrName, nameOpt...)
+ return metav1.ObjectMeta{Namespace: namespace, Name: name}
+}
+
+// ObjectMetaFromKey returns an ObjectMeta with the namespace and name set to the values from the key.
+func ObjectMetaFromKey(key client.ObjectKey) metav1.ObjectMeta {
+ return ObjectMeta(key.Namespace, key.Name)
+}
+
+// WaitUntilResourceDeleted deletes the given resource and then waits until it has been deleted. It respects the
+// given interval and timeout.
+func WaitUntilResourceDeleted(ctx context.Context, c client.Client, obj client.Object, interval time.Duration) error {
+ key := client.ObjectKeyFromObject(obj)
+ return retry.Until(ctx, interval, func(ctx context.Context) (done bool, err error) {
+ if err := c.Get(ctx, key, obj); err != nil {
+ if apierrors.IsNotFound(err) {
+ return retry.Ok()
+ }
+ return retry.SevereError(err)
+ }
+ return retry.MinorError(fmt.Errorf("resource %s still exists", key.String()))
+ })
+}
+
+// WaitUntilResourcesDeleted waits until the given resources are gone.
+// It respects the given interval and timeout.
+func WaitUntilResourcesDeleted(ctx context.Context, c client.Client, list client.ObjectList, interval time.Duration, opts ...client.ListOption) error {
+ return retry.Until(ctx, interval, func(ctx context.Context) (done bool, err error) {
+ if err := c.List(ctx, list, opts...); err != nil {
+ return retry.SevereError(err)
+ }
+ if meta.LenList(list) == 0 {
+ return retry.Ok()
+ }
+ var remainingItems []string
+ acc := meta.NewAccessor()
+ if err := meta.EachListItem(list, func(remainingObj runtime.Object) error {
+ name, err := acc.Name(remainingObj)
+ if err != nil {
+ return err
+ }
+ remainingItems = append(remainingItems, name)
+ return nil
+ }); err != nil {
+ return retry.SevereError(err)
+ }
+ return retry.MinorError(fmt.Errorf("resource(s) %s still exists", remainingItems))
+ })
+}
+
+// WaitUntilResourceDeletedWithDefaults deletes the given resource and then waits until it has been deleted. It
+// uses a default interval and timeout
+func WaitUntilResourceDeletedWithDefaults(ctx context.Context, c client.Client, obj client.Object) error {
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
+ defer cancel()
+
+ return WaitUntilResourceDeleted(ctx, c, obj, 5*time.Second)
+}
+
+// WaitUntilLoadBalancerIsReady waits until the given external load balancer has
+// been created (i.e., its ingress information has been updated in the service status).
+func WaitUntilLoadBalancerIsReady(ctx context.Context, kubeClient kubernetes.Interface, namespace, name string, timeout time.Duration, logger *logrus.Entry) (string, error) {
+ var loadBalancerIngress string
+ if err := retry.UntilTimeout(ctx, 5*time.Second, timeout, func(ctx context.Context) (done bool, err error) {
+ loadBalancerIngress, err = GetLoadBalancerIngress(ctx, kubeClient.Client(), namespace, name)
+ if err != nil {
+ logger.Infof("Waiting until the %s service deployed is ready...", name)
+ // TODO(AC): This is a quite optimistic check / we should differentiate here
+ return retry.MinorError(fmt.Errorf("%s service deployed is not ready: %v", name, err))
+ }
+ return retry.Ok()
+ }); err != nil {
+ fieldSelector := client.MatchingFields{
+ "involvedObject.kind": "Service",
+ "involvedObject.name": name,
+ "involvedObject.namespace": namespace,
+ "type": corev1.EventTypeWarning,
+ }
+ eventList := &corev1.EventList{}
+ if err2 := kubeClient.DirectClient().List(ctx, eventList, fieldSelector); err2 != nil {
+ return "", fmt.Errorf("error '%v' occured while fetching more details on error '%v'", err2, err)
+ }
+
+ if len(eventList.Items) > 0 {
+ eventsErrorMessage := buildEventsErrorMessage(eventList.Items)
+ errorMessage := err.Error() + "\n\n" + eventsErrorMessage
+ return "", errors.New(errorMessage)
+ }
+
+ return "", err
+ }
+
+ return loadBalancerIngress, nil
+}
+
+// GetLoadBalancerIngress takes a context, a client, a namespace and a service name. It queries for a load balancer's technical name
+// (ip address or hostname). It returns the value of the technical name whereby it always prefers the hostname (if given)
+// over the IP address. It also returns the list of all load balancer ingresses.
+func GetLoadBalancerIngress(ctx context.Context, client client.Client, namespace, name string) (string, error) {
+ service := &corev1.Service{}
+ if err := client.Get(ctx, Key(namespace, name), service); err != nil {
+ return "", err
+ }
+
+ var (
+ serviceStatusIngress = service.Status.LoadBalancer.Ingress
+ length = len(serviceStatusIngress)
+ )
+
+ switch {
+ case length == 0:
+ return "", errors.New("`.status.loadBalancer.ingress[]` has no elements yet, i.e. external load balancer has not been created")
+ case serviceStatusIngress[length-1].Hostname != "":
+ return serviceStatusIngress[length-1].Hostname, nil
+ case serviceStatusIngress[length-1].IP != "":
+ return serviceStatusIngress[length-1].IP, nil
+ }
+
+ return "", errors.New("`.status.loadBalancer.ingress[]` has an element which does neither contain `.ip` nor `.hostname`")
+}
+
+// LookupObject retrieves an obj for the given object key dealing with potential stale cache that still does not contain the obj.
+// It first tries to retrieve the obj using the given cached client.
+// If the object key is not found, then it does live lookup from the API server using the given apiReader.
+func LookupObject(ctx context.Context, c client.Client, apiReader client.Reader, key client.ObjectKey, obj client.Object) error {
+ err := c.Get(ctx, key, obj)
+ if err == nil {
+ return nil
+ }
+ if !apierrors.IsNotFound(err) {
+ return err
+ }
+
+ // Try to get the obj, now by doing a live lookup instead of relying on the cache.
+ return apiReader.Get(ctx, key, obj)
+}
+
+// FeatureGatesToCommandLineParameter transforms feature gates given as string/bool map to a command line parameter that
+// is understood by Kubernetes components.
+func FeatureGatesToCommandLineParameter(fg map[string]bool) string {
+ if len(fg) == 0 {
+ return ""
+ }
+
+ keys := make([]string, 0, len(fg))
+ for k := range fg {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ out := "--feature-gates="
+ for _, key := range keys {
+ out += fmt.Sprintf("%s=%s,", key, strconv.FormatBool(fg[key]))
+ }
+ return out
+}
+
+// ReconcileServicePorts reconciles the existing service ports with the desired ports. This means that it takes the
+// existing port (identified by name), and applies the settings from the desired port to it. This way it can keep fields
+// that are defaulted by controllers, e.g. the node port. However, it does not keep ports that are not part of the
+// desired list.
+func ReconcileServicePorts(existingPorts []corev1.ServicePort, desiredPorts []corev1.ServicePort) []corev1.ServicePort {
+ var out []corev1.ServicePort
+
+ for _, desiredPort := range desiredPorts {
+ var port corev1.ServicePort
+
+ for _, existingPort := range existingPorts {
+ if existingPort.Name == desiredPort.Name {
+ port = existingPort
+ break
+ }
+ }
+
+ port.Name = desiredPort.Name
+ if len(desiredPort.Protocol) > 0 {
+ port.Protocol = desiredPort.Protocol
+ }
+ if desiredPort.Port != 0 {
+ port.Port = desiredPort.Port
+ }
+ if desiredPort.TargetPort.Type == intstr.Int || desiredPort.TargetPort.Type == intstr.String {
+ port.TargetPort = desiredPort.TargetPort
+ }
+ if desiredPort.NodePort != 0 {
+ port.NodePort = desiredPort.NodePort
+ }
+
+ out = append(out, port)
+ }
+
+ return out
+}
+
+func buildEventsErrorMessage(events []corev1.Event) string {
+ sortByLastTimestamp := func(o1, o2 controllerutil.Object) bool {
+ obj1, ok1 := o1.(*corev1.Event)
+ obj2, ok2 := o2.(*corev1.Event)
+
+ if !ok1 || !ok2 {
+ return false
+ }
+
+ return obj1.LastTimestamp.Time.Before(obj2.LastTimestamp.Time)
+ }
+
+ list := &corev1.EventList{Items: events}
+ SortBy(sortByLastTimestamp).Sort(list)
+ events = list.Items
+
+ const eventsLimit = 2
+ if len(events) > eventsLimit {
+ events = events[len(events)-eventsLimit:]
+ }
+
+ var builder strings.Builder
+ fmt.Fprintf(&builder, "-> Events:")
+ for _, event := range events {
+ var interval string
+ if event.Count > 1 {
+ interval = fmt.Sprintf("%s ago (x%d over %s)", translateTimestampSince(event.LastTimestamp), event.Count, translateTimestampSince(event.FirstTimestamp))
+ } else {
+ interval = fmt.Sprintf("%s ago", translateTimestampSince(event.FirstTimestamp))
+ if event.FirstTimestamp.IsZero() {
+ interval = fmt.Sprintf("%s ago", translateMicroTimestampSince(event.EventTime))
+ }
+ }
+ source := event.Source.Component
+ if source == "" {
+ source = event.ReportingController
+ }
+
+ fmt.Fprintf(&builder, "\n* %s reported %s: %s", source, interval, event.Message)
+ }
+
+ return builder.String()
+}
+
+// translateTimestampSince returns the elapsed time since timestamp in
+// human-readable approximation.
+func translateTimestampSince(timestamp metav1.Time) string {
+ if timestamp.IsZero() {
+ return ""
+ }
+
+ return duration.HumanDuration(time.Since(timestamp.Time))
+}
+
+// translateMicroTimestampSince returns the elapsed time since timestamp in
+// human-readable approximation.
+func translateMicroTimestampSince(timestamp metav1.MicroTime) string {
+ if timestamp.IsZero() {
+ return ""
+ }
+
+ return duration.HumanDuration(time.Since(timestamp.Time))
+}
+
+// MergeOwnerReferences merges the newReferences with the list of existing references.
+func MergeOwnerReferences(references []metav1.OwnerReference, newReferences ...metav1.OwnerReference) []metav1.OwnerReference {
+ uids := make(map[types.UID]struct{})
+ for _, reference := range references {
+ uids[reference.UID] = struct{}{}
+ }
+
+ for _, newReference := range newReferences {
+ if _, ok := uids[newReference.UID]; !ok {
+ references = append(references, newReference)
+ }
+ }
+
+ return references
+}
+
+// OwnedBy checks if the given object's owner reference contains an entry with the provided attributes.
+func OwnedBy(obj runtime.Object, apiVersion, kind, name string, uid types.UID) bool {
+ acc, err := meta.Accessor(obj)
+ if err != nil {
+ return false
+ }
+
+ for _, ownerReference := range acc.GetOwnerReferences() {
+ return ownerReference.APIVersion == apiVersion &&
+ ownerReference.Kind == kind &&
+ ownerReference.Name == name &&
+ ownerReference.UID == uid
+ }
+
+ return false
+}
+
+// NewestObject returns the most recently created object based on the provided list object type. If a filter function
+// is provided then it will be applied for each object right after listing all objects. If no object remains then nil
+// is returned. The Items field in the list object will be populated with the result returned from the server after
+// applying the filter function (if provided).
+func NewestObject(ctx context.Context, c client.Client, listObj client.ObjectList, filterFn func(runtime.Object) bool, listOpts ...client.ListOption) (runtime.Object, error) {
+ if err := c.List(ctx, listObj, listOpts...); err != nil {
+ return nil, err
+ }
+
+ if filterFn != nil {
+ var items []runtime.Object
+
+ if err := meta.EachListItem(listObj, func(obj runtime.Object) error {
+ if filterFn(obj) {
+ items = append(items, obj)
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ if err := meta.SetList(listObj, items); err != nil {
+ return nil, err
+ }
+ }
+
+ if meta.LenList(listObj) == 0 {
+ return nil, nil
+ }
+
+ ByCreationTimestamp().Sort(listObj)
+
+ items, err := meta.ExtractList(listObj)
+ if err != nil {
+ return nil, err
+ }
+
+ return items[meta.LenList(listObj)-1], nil
+}
+
+// NewestPodForDeployment returns the most recently created Pod object for the given deployment.
+func NewestPodForDeployment(ctx context.Context, c client.Client, deployment *appsv1.Deployment) (*corev1.Pod, error) {
+ listOpts := []client.ListOption{client.InNamespace(deployment.Namespace)}
+ if deployment.Spec.Selector != nil {
+ listOpts = append(listOpts, client.MatchingLabels(deployment.Spec.Selector.MatchLabels))
+ }
+
+ replicaSet, err := NewestObject(
+ ctx,
+ c,
+ &appsv1.ReplicaSetList{},
+ func(obj runtime.Object) bool {
+ return OwnedBy(obj, appsv1.SchemeGroupVersion.String(), "Deployment", deployment.Name, deployment.UID)
+ },
+ listOpts...,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if replicaSet == nil {
+ return nil, nil
+ }
+
+ newestReplicaSet, ok := replicaSet.(*appsv1.ReplicaSet)
+ if !ok {
+ return nil, fmt.Errorf("object is not of type *appsv1.ReplicaSet but %T", replicaSet)
+ }
+
+ pod, err := NewestObject(
+ ctx,
+ c,
+ &corev1.PodList{},
+ func(obj runtime.Object) bool {
+ return OwnedBy(obj, appsv1.SchemeGroupVersion.String(), "ReplicaSet", newestReplicaSet.Name, newestReplicaSet.UID)
+ },
+ listOpts...,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if pod == nil {
+ return nil, nil
+ }
+
+ newestPod, ok := pod.(*corev1.Pod)
+ if !ok {
+ return nil, fmt.Errorf("object is not of type *corev1.Pod but %T", pod)
+ }
+
+ return newestPod, nil
+}
+
+// MostRecentCompleteLogs returns the logs of the pod/container in case it is not running. If the pod/container is
+// running then the logs of the previous pod/container are being returned.
+func MostRecentCompleteLogs(
+ ctx context.Context,
+ podInterface corev1client.PodInterface,
+ pod *corev1.Pod,
+ containerName string,
+ tailLines *int64,
+) (
+ string,
+ error,
+) {
+ previousLogs := false
+ for _, containerStatus := range pod.Status.ContainerStatuses {
+ if containerName == "" || containerStatus.Name == containerName {
+ previousLogs = containerStatus.State.Running != nil
+ break
+ }
+ }
+
+ logs, err := kubernetes.GetPodLogs(ctx, podInterface, pod.Name, &corev1.PodLogOptions{
+ Container: containerName,
+ TailLines: tailLines,
+ Previous: previousLogs,
+ })
+ if err != nil {
+ return "", err
+ }
+
+ return string(logs), nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/leaderelection.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/leaderelection.go
new file mode 100644
index 0000000..5d84c35
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/leaderelection.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+
+ coordinationv1 "k8s.io/api/coordination/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/leaderelection/resourcelock"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// ReadLeaderElectionRecord returns the leader election record for a given lock type and a namespace/name combination.
+func ReadLeaderElectionRecord(ctx context.Context, client client.Client, lock, namespace, name string) (*resourcelock.LeaderElectionRecord, error) {
+ switch lock {
+ case resourcelock.EndpointsResourceLock:
+ endpoint := &corev1.Endpoints{}
+ if err := client.Get(ctx, Key(namespace, name), endpoint); err != nil {
+ return nil, err
+ }
+ return leaderElectionRecordFromAnnotations(endpoint.Annotations)
+
+ case resourcelock.ConfigMapsResourceLock:
+ configmap := &corev1.ConfigMap{}
+ if err := client.Get(ctx, Key(namespace, name), configmap); err != nil {
+ return nil, err
+ }
+ return leaderElectionRecordFromAnnotations(configmap.Annotations)
+
+ case resourcelock.LeasesResourceLock:
+ lease := &coordinationv1.Lease{}
+ if err := client.Get(ctx, Key(namespace, name), lease); err != nil {
+ return nil, err
+ }
+ return resourcelock.LeaseSpecToLeaderElectionRecord(&lease.Spec), nil
+ }
+
+ return nil, fmt.Errorf("unknown lock type: %s", lock)
+}
+
+func leaderElectionRecordFromAnnotations(annotations map[string]string) (*resourcelock.LeaderElectionRecord, error) {
+ var leaderElectionRecord resourcelock.LeaderElectionRecord
+
+ leaderElection, ok := annotations[resourcelock.LeaderElectionRecordAnnotationKey]
+ if !ok {
+ return nil, fmt.Errorf("could not find key %q in annotations", resourcelock.LeaderElectionRecordAnnotationKey)
+ }
+
+ if err := json.Unmarshal([]byte(leaderElection), &leaderElectionRecord); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal leader election record: %+v", err)
+ }
+
+ return &leaderElectionRecord, nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/namespace.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/namespace.go
new file mode 100644
index 0000000..2d1f7c7
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/namespace.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/logger"
+ "github.com/gardener/gardener/pkg/mock/go/context"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ k8s "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/util/retry"
+)
+
+func tryUpdateNamespace(
+ ctx context.Context,
+ k k8s.Interface,
+ backoff wait.Backoff,
+ meta metav1.ObjectMeta,
+ transform func(*corev1.Namespace) (*corev1.Namespace, error),
+ updateFunc func(k k8s.Interface, namespace *corev1.Namespace) (*corev1.Namespace, error),
+ exitEarlyFunc func(cur, updated *corev1.Namespace) bool,
+) (*corev1.Namespace, error) {
+ var (
+ result *corev1.Namespace
+ attempt int
+ )
+
+ err := retry.RetryOnConflict(backoff, func() (err error) {
+ attempt++
+ cur, err := k.CoreV1().Namespaces().Get(ctx, meta.Name, kubernetes.DefaultGetOptions())
+ if err != nil {
+ return err
+ }
+
+ updated, err := transform(cur.DeepCopy())
+ if err != nil {
+ return err
+ }
+
+ if exitEarlyFunc(cur, updated) {
+ result = cur
+ return nil
+ }
+
+ result, err = updateFunc(k, updated)
+ if err != nil {
+ logger.Logger.Errorf("Attempt %d failed to update Namespace %s due to %v", attempt, cur.Name, err)
+ }
+ return
+ })
+ if err != nil {
+ logger.Logger.Errorf("Failed to update Namespace %s after %d attempts due to %v", meta.Name, attempt, err)
+ }
+
+ return result, err
+}
+
+// TryUpdateNamespace tries to update a namespace and retries the operation with the given .
+func TryUpdateNamespace(ctx context.Context, k k8s.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*corev1.Namespace) (*corev1.Namespace, error)) (*corev1.Namespace, error) {
+ return tryUpdateNamespace(ctx, k, backoff, meta, transform, func(k k8s.Interface, namespace *corev1.Namespace) (*corev1.Namespace, error) {
+ return k.CoreV1().Namespaces().Update(ctx, namespace, kubernetes.DefaultUpdateOptions())
+ }, func(cur, updated *corev1.Namespace) bool {
+ return equality.Semantic.DeepEqual(cur, updated)
+ })
+}
+
+// TryUpdateNamespaceLabels tries to update a namespace's labels and retries the operation with the given .
+func TryUpdateNamespaceLabels(ctx context.Context, k k8s.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*corev1.Namespace) (*corev1.Namespace, error)) (*corev1.Namespace, error) {
+ return tryUpdateNamespace(ctx, k, backoff, meta, transform, func(k k8s.Interface, namespace *corev1.Namespace) (*corev1.Namespace, error) {
+ return k.CoreV1().Namespaces().Update(ctx, namespace, kubernetes.DefaultUpdateOptions())
+ }, func(cur, updated *corev1.Namespace) bool {
+ return equality.Semantic.DeepEqual(cur.Labels, updated.Labels)
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/node.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/node.go
new file mode 100644
index 0000000..84af36b
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/node.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/labels"
+)
+
+// NodeSource is a function that produces a slice of Nodes or an error.
+type NodeSource func() ([]*corev1.Node, error)
+
+// NodeLister is a lister of Nodes.
+type NodeLister interface {
+ // List lists all Nodes that match the given selector.
+ List(selector labels.Selector) ([]*corev1.Node, error)
+}
+
+type nodeLister struct {
+ source NodeSource
+}
+
+// NewNodeLister creates a new NodeLister from the given NodeSource.
+func NewNodeLister(source NodeSource) NodeLister {
+ return &nodeLister{source: source}
+}
+
+func filterNodes(source NodeSource, filter func(*corev1.Node) bool) ([]*corev1.Node, error) {
+ nodes, err := source()
+ if err != nil {
+ return nil, err
+ }
+
+ var out []*corev1.Node
+ for _, node := range nodes {
+ if filter(node) {
+ out = append(out, node)
+ }
+ }
+ return out, nil
+}
+
+func (d *nodeLister) List(selector labels.Selector) ([]*corev1.Node, error) {
+ return filterNodes(d.source, func(node *corev1.Node) bool {
+ return selector.Matches(labels.Set(node.Labels))
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go
new file mode 100644
index 0000000..be0080d
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// ObjectName returns the name of the given object in the format /
+func ObjectName(obj client.Object) string {
+ return client.ObjectKeyFromObject(obj).String()
+}
+
+// DeleteObjects deletes a list of Kubernetes objects.
+func DeleteObjects(ctx context.Context, c client.Client, objects ...client.Object) error {
+ for _, obj := range objects {
+ if err := DeleteObject(ctx, c, obj); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// DeleteObject deletes a Kubernetes object. It ignores 'not found' and 'no match' errors.
+func DeleteObject(ctx context.Context, c client.Client, object client.Object) error {
+ if err := c.Delete(ctx, object); client.IgnoreNotFound(err) != nil && !meta.IsNoMatchError(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/patch.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/patch.go
new file mode 100644
index 0000000..1bb9a65
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/patch.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+
+ jsoniter "github.com/json-iterator/go"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var json = jsoniter.ConfigFastest
+
+// TryPatch tries to apply the given transformation function onto the given object, and to patch it afterwards with optimistic locking.
+// It retries the patch with an exponential backoff.
+func TryPatch(ctx context.Context, backoff wait.Backoff, c client.Client, obj client.Object, transform func() error) error {
+ return tryPatch(ctx, backoff, c, obj, c.Patch, transform)
+}
+
+// TryPatchStatus tries to apply the given transformation function onto the given object, and to patch its
+// status afterwards with optimistic locking. It retries the status patch with an exponential backoff.
+func TryPatchStatus(ctx context.Context, backoff wait.Backoff, c client.Client, obj client.Object, transform func() error) error {
+ return tryPatch(ctx, backoff, c, obj, c.Status().Patch, transform)
+}
+
+func tryPatch(ctx context.Context, backoff wait.Backoff, c client.Client, obj client.Object, patchFunc func(context.Context, client.Object, client.Patch, ...client.PatchOption) error, transform func() error) error {
+ resetCopy := obj.DeepCopyObject()
+ return exponentialBackoff(ctx, backoff, func() (bool, error) {
+ if err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil {
+ return false, err
+ }
+ beforeTransform := obj.DeepCopyObject()
+ if err := transform(); err != nil {
+ return false, err
+ }
+
+ if reflect.DeepEqual(obj, beforeTransform) {
+ return true, nil
+ }
+
+ patch := client.MergeFromWithOptions(beforeTransform, client.MergeFromWithOptimisticLock{})
+
+ if err := patchFunc(ctx, obj, patch); err != nil {
+ if apierrors.IsConflict(err) {
+ reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(resetCopy).Elem())
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+ })
+}
+
+// CreateTwoWayMergePatch creates a two way merge patch of the given objects.
+// The two objects have to be pointers implementing the interfaces.
+func CreateTwoWayMergePatch(obj1 metav1.Object, obj2 metav1.Object) ([]byte, error) {
+ t1, t2 := reflect.TypeOf(obj1), reflect.TypeOf(obj2)
+ if t1 != t2 {
+ return nil, fmt.Errorf("cannot patch two objects of different type: %q - %q", t1, t2)
+ }
+ if t1.Kind() != reflect.Ptr {
+ return nil, fmt.Errorf("type has to be of kind pointer but got %q", t1)
+ }
+
+ obj1Data, err := json.Marshal(obj1)
+ if err != nil {
+ return nil, err
+ }
+
+ obj2Data, err := json.Marshal(obj2)
+ if err != nil {
+ return nil, err
+ }
+
+ dataStructType := t1.Elem()
+ dataStruct := reflect.New(dataStructType).Elem().Interface()
+
+ return strategicpatch.CreateTwoWayMergePatch(obj1Data, obj2Data, dataStruct)
+}
+
+// IsEmptyPatch checks if the given patch is empty. A patch is considered empty if it is
+// the empty string or if it json-decodes to an empty json map.
+func IsEmptyPatch(patch []byte) bool {
+ if len(strings.TrimSpace(string(patch))) == 0 {
+ return true
+ }
+
+ var m map[string]interface{}
+ if err := json.Unmarshal(patch, &m); err != nil {
+ return false
+ }
+
+ return len(m) == 0
+}
+
+// SubmitEmptyPatch submits an empty patch to the given `obj` with the given `client` instance.
+func SubmitEmptyPatch(ctx context.Context, c client.Client, obj client.Object) error {
+ return c.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, []byte("{}")))
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/project.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/project.go
new file mode 100644
index 0000000..5425114
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/project.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardencore "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/logger"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/util/retry"
+)
+
+func tryUpdateProject(
+ ctx context.Context,
+ g gardencore.Interface,
+ backoff wait.Backoff,
+ meta metav1.ObjectMeta,
+ transform func(*gardencorev1beta1.Project) (*gardencorev1beta1.Project, error),
+ updateFunc func(g gardencore.Interface, project *gardencorev1beta1.Project) (*gardencorev1beta1.Project, error),
+ compare func(cur, updated *gardencorev1beta1.Project) bool,
+) (*gardencorev1beta1.Project, error) {
+ var (
+ result *gardencorev1beta1.Project
+ attempt int
+ )
+
+ err := retry.RetryOnConflict(backoff, func() (err error) {
+ attempt++
+ cur, err := g.CoreV1beta1().Projects().Get(ctx, meta.Name, kubernetes.DefaultGetOptions())
+ if err != nil {
+ return err
+ }
+
+ updated, err := transform(cur.DeepCopy())
+ if err != nil {
+ return err
+ }
+
+ if compare(cur, updated) {
+ result = cur
+ return nil
+ }
+
+ result, err = updateFunc(g, updated)
+ if err != nil {
+ logger.Logger.Errorf("Attempt %d failed to update Project %s due to %v", attempt, cur.Name, err)
+ }
+ return
+ })
+ if err != nil {
+ logger.Logger.Errorf("Failed to update Project %s after %d attempts due to %v", meta.Name, attempt, err)
+ }
+
+ return result, err
+}
+
+// TryUpdateProject tries to update a project and retries the operation with the given .
+func TryUpdateProject(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.Project) (*gardencorev1beta1.Project, error)) (*gardencorev1beta1.Project, error) {
+ return tryUpdateProject(ctx, g, backoff, meta, transform, func(g gardencore.Interface, project *gardencorev1beta1.Project) (*gardencorev1beta1.Project, error) {
+ return g.CoreV1beta1().Projects().Update(ctx, project, kubernetes.DefaultUpdateOptions())
+ }, func(cur, updated *gardencorev1beta1.Project) bool {
+ return equality.Semantic.DeepEqual(cur, updated)
+ })
+}
+
+// TryUpdateProjectStatus tries to update a project's status and retries the operation with the given .
+func TryUpdateProjectStatus(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.Project) (*gardencorev1beta1.Project, error)) (*gardencorev1beta1.Project, error) {
+ return tryUpdateProject(ctx, g, backoff, meta, transform, func(g gardencore.Interface, project *gardencorev1beta1.Project) (*gardencorev1beta1.Project, error) {
+ return g.CoreV1beta1().Projects().UpdateStatus(ctx, project, kubernetes.DefaultUpdateOptions())
+ }, func(cur, updated *gardencorev1beta1.Project) bool {
+ return equality.Semantic.DeepEqual(cur.Status, updated.Status)
+ })
+}
diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/seed.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/seed.go
new file mode 100644
index 0000000..3f26afb
--- /dev/null
+++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/seed.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kubernetes
+
+import (
+ "context"
+
+ gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
+ gardencore "github.com/gardener/gardener/pkg/client/core/clientset/versioned"
+ "github.com/gardener/gardener/pkg/client/kubernetes"
+ "github.com/gardener/gardener/pkg/logger"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/util/retry"
+)
+
+func tryUpdateSeed(
+ ctx context.Context,
+ g gardencore.Interface,
+ backoff wait.Backoff,
+ meta metav1.ObjectMeta,
+ transform func(*gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error),
+ updateFunc func(g gardencore.Interface, seed *gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error),
+ equalFunc func(cur, updated *gardencorev1beta1.Seed) bool,
+) (*gardencorev1beta1.Seed, error) {
+
+ var (
+ result *gardencorev1beta1.Seed
+ attempt int
+ )
+ err := retry.RetryOnConflict(backoff, func() (err error) {
+ attempt++
+ cur, err := g.CoreV1beta1().Seeds().Get(ctx, meta.Name, kubernetes.DefaultGetOptions())
+ if err != nil {
+ return err
+ }
+
+ updated, err := transform(cur.DeepCopy())
+ if err != nil {
+ return err
+ }
+
+ if equalFunc(cur, updated) {
+ result = cur
+ return nil
+ }
+
+ result, err = updateFunc(g, updated)
+ if err != nil {
+ logger.Logger.Errorf("Attempt %d failed to update Seed %s due to %v", attempt, cur.Name, err)
+ }
+ return
+ })
+ if err != nil {
+ logger.Logger.Errorf("Failed to update Seed %s after %d attempts due to %v", meta.Name, attempt, err)
+ }
+ return result, err
+}
+
+// TryUpdateSeedWithEqualFunc tries to update the status of the seed matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the Seed object. If the equal
+// func concludes a semantically equal Seed, no update is done and the operation returns normally.
+func TryUpdateSeedWithEqualFunc(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error), equal func(cur, updated *gardencorev1beta1.Seed) bool) (*gardencorev1beta1.Seed, error) {
+ return tryUpdateSeed(ctx, g, backoff, meta, transform, func(g gardencore.Interface, seed *gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error) {
+ return g.CoreV1beta1().Seeds().Update(ctx, seed, kubernetes.DefaultUpdateOptions())
+ }, equal)
+}
+
+// TryUpdateSeed tries to update the status of the seed matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the Seed object. If the transformation
+// yields a semantically equal Seed, no update is done and the operation returns normally.
+func TryUpdateSeed(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error)) (*gardencorev1beta1.Seed, error) {
+ return TryUpdateSeedWithEqualFunc(ctx, g, backoff, meta, transform, func(cur, updated *gardencorev1beta1.Seed) bool {
+ return equality.Semantic.DeepEqual(cur, updated)
+ })
+}
+
+// TryUpdateSeedStatus tries to update the status of the seed matching the given .
+// It retries with the given characteristics as long as it gets Conflict errors.
+// The transformation function is applied to the current state of the Seed object. If the transformation
+// yields a semantically equal Seed (regarding Status), no update is done and the operation returns normally.
+func TryUpdateSeedStatus(ctx context.Context, g gardencore.Interface, backoff wait.Backoff, meta metav1.ObjectMeta, transform func(*gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error)) (*gardencorev1beta1.Seed, error) {
+ return tryUpdateSeed(ctx, g, backoff, meta, transform, func(g gardencore.Interface, seed *gardencorev1beta1.Seed) (*gardencorev1beta1.Seed, error) {
+ return g.CoreV1beta1().Seeds().UpdateStatus(ctx, seed, kubernetes.DefaultUpdateOptions())
+ }, func(cur, updated *gardencorev1beta1.Seed) bool {
+ return equality.Semantic.DeepEqual(cur.Status, updated.Status)
+ })
+}
+
+// TryUpdateSeedConditions tries to update the status of the seed matching the given